repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ralph-group/pymeasure
|
pymeasure/experiment/experiment.py
|
1
|
9800
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger()
log.addHandler(logging.NullHandler())
try:
from IPython import display
except ImportError:
log.warning("IPython could not be imported")
from .results import unique_filename
from .config import get_config, set_mpl_rcparams
from pymeasure.log import setup_logging, console_log
from pymeasure.experiment import Results, Worker
from .parameters import Measurable
import time, signal
import numpy as np
import pandas as pd
import tempfile
import gc
def get_array(start, stop, step):
"""Returns a numpy array from start to stop"""
step = np.sign(stop - start) * abs(step)
return np.arange(start, stop + step, step)
def get_array_steps(start, stop, numsteps):
"""Returns a numpy array from start to stop in numsteps"""
return get_array(start, stop, (abs(stop - start) / numsteps))
def get_array_zero(maxval, step):
"""Returns a numpy array from 0 to maxval to -maxval to 0"""
return np.concatenate((np.arange(0, maxval, step), np.arange(maxval, -maxval, -step),
np.arange(-maxval, 0, step)))
def create_filename(title):
"""
Create a new filename according to the style defined in the config file.
If no config is specified, create a temporary file.
"""
config = get_config()
if 'Filename' in config._sections.keys():
filename = unique_filename(suffix='_%s' % title, **config._sections['Filename'])
else:
filename = tempfile.mktemp()
return filename
class Experiment(object):
""" Class which starts logging and creates/runs the results and worker processes.
.. code-block:: python
procedure = Procedure()
experiment = Experiment(title, procedure)
experiment.start()
experiment.plot_live('x', 'y', style='.-')
for a multi-subplot graph:
import pylab as pl
ax1 = pl.subplot(121)
experiment.plot('x','y',ax=ax1)
ax2 = pl.subplot(122)
experiment.plot('x','z',ax=ax2)
experiment.plot_live()
:var value: The value of the parameter
:param title: The experiment title
:param procedure: The procedure object
:param analyse: Post-analysis function, which takes a pandas dataframe as input and
returns it with added (analysed) columns. The analysed results are accessible via
experiment.data, as opposed to experiment.results.data for the 'raw' data.
:param _data_timeout: Time limit for how long live plotting should wait for datapoints.
"""
def __init__(self, title, procedure, analyse=(lambda x: x)):
self.title = title
self.procedure = procedure
self.measlist = []
self.port = 5888
self.plots = []
self.figs = []
self._data = []
self.analyse = analyse
self._data_timeout = 10
config = get_config()
set_mpl_rcparams(config)
if 'Logging' in config._sections.keys():
self.scribe = setup_logging(log, **config._sections['Logging'])
else:
self.scribe = console_log(log)
self.scribe.start()
self.filename = create_filename(self.title)
log.info("Using data file: %s" % self.filename)
self.results = Results(self.procedure, self.filename)
log.info("Set up Results")
self.worker = Worker(self.results, self.scribe.queue, logging.DEBUG)
log.info("Create worker")
def start(self):
"""Start the worker"""
log.info("Starting worker...")
self.worker.start()
@property
def data(self):
"""Data property which returns analysed data, if an analyse function
is defined, otherwise returns the raw data."""
self._data = self.analyse(self.results.data.copy())
return self._data
def wait_for_data(self):
"""Wait for the data attribute to fill with datapoints."""
t = time.time()
while self.data.empty:
time.sleep(.1)
if (time.time() - t) > self._data_timeout:
log.warning('Timeout, no data received for liveplot')
return False
return True
def plot_live(self, *args, **kwargs):
"""Live plotting loop for jupyter notebook, which automatically updates
(an) in-line matplotlib graph(s). Will create a new plot as specified by input
arguments, or will update (an) existing plot(s)."""
if self.wait_for_data():
if not (self.plots):
self.plot(*args, **kwargs)
while not self.worker.should_stop():
self.update_plot()
display.clear_output(wait=True)
if self.worker.is_alive():
self.worker.terminate()
self.scribe.stop()
def plot(self, *args, **kwargs):
"""Plot the results from the experiment.data pandas dataframe. Store the
plots in a plots list attribute."""
if self.wait_for_data():
kwargs['title'] = self.title
ax = self.data.plot(*args, **kwargs)
self.plots.append({'type': 'plot', 'args': args, 'kwargs': kwargs, 'ax': ax})
if ax.get_figure() not in self.figs:
self.figs.append(ax.get_figure())
self._user_interrupt = False
def clear_plot(self):
"""Clear the figures and plot lists."""
for fig in self.figs:
fig.clf()
for pl in self.plots:
pl.close()
self.figs = []
self.plots = []
gc.collect()
def update_plot(self):
"""Update the plots in the plots list with new data from the experiment.data
pandas dataframe."""
try:
tasks = []
self.data
for plot in self.plots:
ax = plot['ax']
if plot['type'] == 'plot':
x, y = plot['args'][0], plot['args'][1]
if type(y) == str:
y = [y]
for yname, line in zip(y, ax.lines):
self.update_line(ax, line, x, yname)
if plot['type'] == 'pcolor':
x, y, z = plot['x'], plot['y'], plot['z']
self.update_pcolor(ax, x, y, z)
display.clear_output(wait=True)
display.display(*self.figs)
time.sleep(0.1)
except KeyboardInterrupt:
display.clear_output(wait=True)
display.display(*self.figs)
self._user_interrupt = True
def pcolor(self, xname, yname, zname, *args, **kwargs):
"""Plot the results from the experiment.data pandas dataframe in a pcolor graph.
Store the plots in a plots list attribute."""
title = self.title
x, y, z = self._data[xname], self._data[yname], self._data[zname]
shape = (len(y.unique()), len(x.unique()))
diff = shape[0] * shape[1] - len(z)
Z = np.concatenate((z.values, np.zeros(diff))).reshape(shape)
df = pd.DataFrame(Z, index=y.unique(), columns=x.unique())
# TODO: Remove seaborn dependencies
ax = sns.heatmap(df)
pl.title(title)
pl.xlabel(xname)
pl.ylabel(yname)
ax.invert_yaxis()
pl.plt.show()
self.plots.append(
{'type': 'pcolor', 'x': xname, 'y': yname, 'z': zname, 'args': args, 'kwargs': kwargs,
'ax': ax})
if ax.get_figure() not in self.figs:
self.figs.append(ax.get_figure())
def update_pcolor(self, ax, xname, yname, zname):
"""Update a pcolor graph with new data."""
x, y, z = self._data[xname], self._data[yname], self._data[zname]
shape = (len(y.unique()), len(x.unique()))
diff = shape[0] * shape[1] - len(z)
Z = np.concatenate((z.values, np.zeros(diff))).reshape(shape)
df = pd.DataFrame(Z, index=y.unique(), columns=x.unique())
cbar_ax = ax.get_figure().axes[1]
# TODO: Remove seaborn dependencies
sns.heatmap(df, ax=ax, cbar_ax=cbar_ax)
ax.set_xlabel(xname)
ax.set_ylabel(yname)
ax.invert_yaxis()
def update_line(self, ax, hl, xname, yname):
"""Update a line in a matplotlib graph with new data."""
del hl._xorig, hl._yorig
hl.set_xdata(self._data[xname])
hl.set_ydata(self._data[yname])
ax.relim()
ax.autoscale()
gc.collect()
def __del__(self):
self.scribe.stop()
if self.worker.is_alive():
self.worker.recorder_queue.put(None)
self.worker.monitor_queue.put(None)
self.worker.stop()
|
mit
|
bnaul/scikit-learn
|
sklearn/datasets/_twenty_newsgroups.py
|
2
|
17184
|
"""Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
from os.path import dirname, join
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
import joblib
from . import get_data_home
from . import load_files
from ._base import _pkl_filepath
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..feature_extraction.text import CountVectorizer
from .. import preprocessing
from ..utils import check_random_state, Bunch
from ..utils.validation import _deprecate_positional_args
logger = logging.getLogger(__name__)
# The original data can be found at:
# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz
ARCHIVE = RemoteFileMetadata(
filename='20news-bydate.tar.gz',
url='https://ndownloader.figshare.com/files/5975967',
checksum=('8f1b2514ca22a5ade8fbb9cfa5727df9'
'5fa587f4c87b786e15c759fa66d95610'))
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def _download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=target_dir)
logger.debug("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
Parameters
----------
text : string
The text from which to remove the signature block.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
Parameters
----------
text : string
The text from which to remove the signature block.
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
Parameters
----------
text : string
The text from which to remove the signature block.
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
@_deprecate_positional_args
def fetch_20newsgroups(*, data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True, return_X_y=False):
"""Load the filenames and data from the 20 newsgroups dataset \
(classification).
Download it if necessary.
================= ==========
Classes 20
Samples total 18846
Dimensionality 1
Features text
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
data_home : str, default=None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
categories : array-like, dtype=str or unicode, default=None
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, default=True
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int or RandomState instance, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
download_if_missing : bool, default=True
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns `(data.data, data.target)` instead of a Bunch
object.
.. versionadded:: 0.22
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : list, length [n_samples]
The data list to learn.
target: array, shape [n_samples]
The target labels.
filenames: list, length [n_samples]
The path to the location of the data.
DESCR: str
The full description of the dataset.
target_names: list, length [n_classes]
The names of target classes.
(data, target) : tuple if `return_X_y=True`
.. versionadded:: 0.22
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = _download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'twenty_newsgroups.rst')) as rst_file:
fdescr = rst_file.read()
data.DESCR = fdescr
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
return data.data, data.target
return data
@_deprecate_positional_args
def fetch_20newsgroups_vectorized(*, subset="train", remove=(), data_home=None,
download_if_missing=True, return_X_y=False,
normalize=True):
"""Load the 20 newsgroups dataset and vectorize it into token counts \
(classification).
Download it if necessary.
This is a convenience function; the transformation is done using the
default settings for
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
The resulting counts are normalized using
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
================= ==========
Classes 20
Samples total 18846
Dimensionality 130107
Features real
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
data_home : str, default=None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
normalize : bool, default=True
If True, normalizes each document's feature vector to unit norm using
:func:`sklearn.preprocessing.normalize`.
.. versionadded:: 0.22
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: sparse matrix, shape [n_samples, n_features]
The data matrix to learn.
target: array, shape [n_samples]
The target labels.
target_names: list, length [n_classes]
The names of target classes.
DESCR: str
The full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove,
download_if_missing=download_if_missing)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove,
download_if_missing=download_if_missing)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
if normalize:
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
preprocessing.normalize(X_train, copy=False)
preprocessing.normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'twenty_newsgroups.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
target_names=target_names,
DESCR=fdescr)
|
bsd-3-clause
|
moutai/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
24
|
6079
|
"""Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
""" Test that GPC can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
""" Test GPC for multi-class classification problems. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
""" Test that multi-class GPC produces identical results with n_jobs>1. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
|
bsd-3-clause
|
soerendip42/rdkit
|
Contrib/pzc/p_con.py
|
3
|
48985
|
# coding=utf-8
# Copyright (c) 2014 Merck KGaA
from __future__ import print_function
import os,re,gzip,json,requests,sys, optparse,csv
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import SDWriter
from rdkit.Chem import Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
from scipy import interp
from scipy import stats
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_score,recall_score
from sklearn import preprocessing
import cPickle
from pickle import Unpickler
import numpy as np
import math
from pylab import *
from sklearn.metrics import make_scorer
kappa_template = '''\
%(kind)s Kappa Coefficient
--------------------------------
Kappa %(kappa)6.4f
ASE %(std_kappa)6.4f
%(alpha_ci)s%% Lower Conf Limit %(kappa_low)6.4f
%(alpha_ci)s%% Upper Conf Limit %(kappa_upp)6.4f
Test of H0: %(kind)s Kappa = 0
ASE under H0 %(std_kappa0)6.4f
Z %(z_value)6.4f
One-sided Pr > Z %(pvalue_one_sided)6.4f
Two-sided Pr > |Z| %(pvalue_two_sided)6.4f
'''
'''
Weighted Kappa Coefficient
--------------------------------
Weighted Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010
'''
def int_ifclose(x, dec=1, width=4):
'''helper function for creating result string for int or float
only dec=1 and width=4 is implemented
Parameters
----------
x : int or float
value to format
dec : 1
number of decimals to print if x is not an integer
width : 4
width of string
Returns
-------
xint : int or float
x is converted to int if it is within 1e-14 of an integer
x_string : str
x formatted as string, either '%4d' or '%4.1f'
'''
xint = int(round(x))
if np.max(np.abs(xint - x)) < 1e-14:
return xint, '%4d' % xint
else:
return x, '%4.1f' % x
class KappaResults(dict):
def __init__(self, **kwds):
self.update(kwds)
if not 'alpha' in self:
self['alpha'] = 0.025
self['alpha_ci'] = int_ifclose(100 - 0.025 * 200)[1]
self['std_kappa'] = np.sqrt(self['var_kappa'])
self['std_kappa0'] = np.sqrt(self['var_kappa0'])
self['z_value'] = self['kappa'] / self['std_kappa0']
self['pvalue_one_sided'] = stats.norm.sf(self['z_value'])
self['pvalue_two_sided'] = self['pvalue_one_sided'] * 2
delta = stats.norm.isf(self['alpha']) * self['std_kappa']
self['kappa_low'] = self['kappa'] - delta
self['kappa_upp'] = self['kappa'] + delta
def __str__(self):
return kappa_template % self
def cohens_kappa(table, weights=None, return_results=True, wt=None):
'''Compute Cohen's kappa with variance and equal-zero test
Parameters
----------
table : array_like, 2-Dim
square array with results of two raters, one rater in rows, second
rater in columns
weights : array_like
The interpretation of weights depends on the wt argument.
If both are None, then the simple kappa is computed.
see wt for the case when wt is not None
If weights is two dimensional, then it is directly used as a weight
matrix. For computing the variance of kappa, the maximum of the
weights is assumed to be smaller or equal to one.
TODO: fix conflicting definitions in the 2-Dim case for
wt : None or string
If wt and weights are None, then the simple kappa is computed.
If wt is given, but weights is None, then the weights are set to
be [0, 1, 2, ..., k].
If weights is a one-dimensional array, then it is used to construct
the weight matrix given the following options.
wt in ['linear', 'ca' or None] : use linear weights, Cicchetti-Allison
actual weights are linear in the score "weights" difference
wt in ['quadratic', 'fc'] : use linear weights, Fleiss-Cohen
actual weights are squared in the score "weights" difference
wt = 'toeplitz' : weight matrix is constructed as a toeplitz matrix
from the one dimensional weights.
return_results : bool
If True (default), then an instance of KappaResults is returned.
If False, then only kappa is computed and returned.
Returns
-------
results or kappa
If return_results is True (default), then a results instance with all
statistics is returned
If return_results is False, then only kappa is calculated and returned.
Notes
-----
There are two conflicting definitions of the weight matrix, Wikipedia
versus SAS manual. However, the computation are invariant to rescaling
of the weights matrix, so there is no difference in the results.
Weights for 'linear' and 'quadratic' are interpreted as scores for the
categories, the weights in the computation are based on the pairwise
difference between the scores.
Weights for 'toeplitz' are a interpreted as weighted distance. The distance
only depends on how many levels apart two entries in the table are but
not on the levels themselves.
example:
weights = '0, 1, 2, 3' and wt is either linear or toeplitz means that the
weighting only depends on the simple distance of levels.
weights = '0, 0, 1, 1' and wt = 'linear' means that the first two levels
are zero distance apart and the same for the last two levels. This is
the sampe as forming two aggregated levels by merging the first two and
the last two levels, respectively.
weights = [0, 1, 2, 3] and wt = 'quadratic' is the same as squaring these
weights and using wt = 'toeplitz'.
References
----------
Wikipedia
SAS Manual
'''
table = np.asarray(table, float) #avoid integer division
agree = np.diag(table).sum()
nobs = table.sum()
probs = table / nobs
freqs = probs #TODO: rename to use freqs instead of probs for observed
probs_diag = np.diag(probs)
freq_row = table.sum(1) / nobs
freq_col = table.sum(0) / nobs
prob_exp = freq_col * freq_row[:, None]
assert np.allclose(prob_exp.sum(), 1)
#print prob_exp.sum()
agree_exp = np.diag(prob_exp).sum() #need for kappa_max
if weights is None and wt is None:
kind = 'Simple'
kappa = (agree / nobs - agree_exp) / (1 - agree_exp)
if return_results:
#variance
term_a = probs_diag * (1 - (freq_row + freq_col) * (1 - kappa))**2
term_a = term_a.sum()
term_b = probs * (freq_col[:, None] + freq_row)**2
d_idx = np.arange(table.shape[0])
term_b[d_idx, d_idx] = 0 #set diagonal to zero
term_b = (1 - kappa)**2 * term_b.sum()
term_c = (kappa - agree_exp * (1-kappa))**2
var_kappa = (term_a + term_b - term_c) / (1 - agree_exp)**2 / nobs
#term_c = freq_col * freq_row[:, None] * (freq_col + freq_row[:,None])
term_c = freq_col * freq_row * (freq_col + freq_row)
var_kappa0 = (agree_exp + agree_exp**2 - term_c.sum())
var_kappa0 /= (1 - agree_exp)**2 * nobs
else:
if weights is None:
weights = np.arange(table.shape[0])
#weights follows the Wikipedia definition, not the SAS, which is 1 -
kind = 'Weighted'
weights = np.asarray(weights, float)
if weights.ndim == 1:
if wt in ['ca', 'linear', None]:
weights = np.abs(weights[:, None] - weights) / \
(weights[-1] - weights[0])
elif wt in ['fc', 'quadratic']:
weights = (weights[:, None] - weights)**2 / \
(weights[-1] - weights[0])**2
elif wt == 'toeplitz':
#assume toeplitz structure
from scipy.linalg import toeplitz
#weights = toeplitz(np.arange(table.shape[0]))
weights = toeplitz(weights)
else:
raise ValueError('wt option is not known')
else:
rows, cols = table.shape
if (table.shape != weights.shape):
raise ValueError('weights are not square')
#this is formula from Wikipedia
kappa = 1 - (weights * table).sum() / nobs / (weights * prob_exp).sum()
#TODO: add var_kappa for weighted version
if return_results:
var_kappa = np.nan
var_kappa0 = np.nan
#switch to SAS manual weights, problem if user specifies weights
#w is negative in some examples,
#but weights is scale invariant in examples and rough check of source
w = 1. - weights
w_row = (freq_col * w).sum(1)
w_col = (freq_row[:, None] * w).sum(0)
agree_wexp = (w * freq_col * freq_row[:, None]).sum()
term_a = freqs * (w - (w_col + w_row[:, None]) * (1 - kappa))**2
fac = 1. / ((1 - agree_wexp)**2 * nobs)
var_kappa = term_a.sum() - (kappa - agree_wexp * (1 - kappa))**2
var_kappa *= fac
freqse = freq_col * freq_row[:, None]
var_kappa0 = (freqse * (w - (w_col + w_row[:, None]))**2).sum()
var_kappa0 -= agree_wexp**2
var_kappa0 *= fac
kappa_max = (np.minimum(freq_row, freq_col).sum() - agree_exp) / \
(1 - agree_exp)
if return_results:
res = KappaResults( kind=kind,
kappa=kappa,
kappa_max=kappa_max,
weights=weights,
var_kappa=var_kappa,
var_kappa0=var_kappa0
)
return res
else:
return kappa
def to_table(data, bins=None):
'''convert raw data with shape (subject, rater) to (rater1, rater2)
brings data into correct format for cohens_kappa
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
bins : None, int or tuple of array_like
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
If bins are a tuple of two array_like, then the bins are directly used
by ``numpy.histogramdd``. This is useful if we want to merge categories.
Returns
-------
arr : nd_array, (n_cat, n_cat)
Contingency table that contains counts of category level with rater1
in rows and rater2 in columns.
Notes
-----
no NaN handling, delete rows with missing values
This works also for more than two raters. In that case the dimension of
the resulting contingency table is the same as the number of raters
instead of 2-dimensional.
'''
data = np.asarray(data)
n_rows, n_cols = data.shape
if bins is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
bins_ = np.arange(n_cat+1) - 0.5
#alternative implementation with double loop
#tt = np.asarray([[(x == [i,j]).all(1).sum() for j in cat_uni]
# for i in cat_uni] )
#other altervative: unique rows and bincount
elif np.isscalar(bins):
bins_ = np.arange(bins+1) - 0.5
data_ = data
else:
bins_ = bins
data_ = data
tt = np.histogramdd(data_, (bins_,)*n_cols)
return tt[0], bins_
class p_con:
"""Class to create Models to classify Molecules active or inactive
using threshold for value in training-data"""
def __init__(self,acc_id=None,proxy={}):
"""Constructor to initialize Object, use proxy if neccessary"""
self.request_data={"acc_id":acc_id,"proxy":proxy}
self.acc_id = acc_id
self.proxy = proxy
self.model = []
self.verbous = False
def __str__(self):
"""String-Representation for Object"""
self.request_data["cmpd_count"] = len(self.sd_entries)
retString = ""
for key in self.request_data.keys():
retString += "%s: %s\n" % (key,self.request_data[key])
return retString.rstrip()
def step_0_get_chembl_data(self):
"""Download Compound-Data for self.acc_id, these are available in self.sd_entries afterwards"""
def looks_like_number(x):
"""Check for proper Float-Value"""
try:
float(x)
return True
except ValueError:
return False
if self.acc_id.find("CHEMBL") == -1:
self.target_data = requests.get("https://www.ebi.ac.uk/chemblws/targets/uniprot/{}.json".format(self.acc_id),proxies=self.proxy).json()
else:
self.target_data = {}
self.target_data['target'] = {}
self.target_data['target']['chemblId'] = self.acc_id
self.chembl_id = self.target_data['target']['chemblId']
self.request_data["chembl_id"] = self.target_data['target']['chemblId']
# print self.target_data
self.bioactivity_data = requests.get("https://www.ebi.ac.uk/chemblws/targets/{}/bioactivities.json".format(self.target_data['target']['chemblId']),proxies=self.proxy).json()
ic50_skip=0
ki_skip=0
inhb_skip=0
count=0
non_homo=0
self.dr={}
i = 0
x = len(self.bioactivity_data['bioactivities'] )
for bioactivity in [record for record in self.bioactivity_data['bioactivities'] if looks_like_number(record['value']) ] :
if i%100 == 0:
sys.stdout.write('\r' + str(i) + '/' +str(x) + ' > <\b\b\b\b\b\b\b\b\b\b\b')
elif (i%100)%10==0:
sys.stdout.write('|')
sys.stdout.flush()
i += 1
# if i > 5000: break
if bioactivity['organism'] != 'Homo sapiens':
non_homo+=1
continue
if re.search('IC50', bioactivity['bioactivity_type']):
if bioactivity['units'] != 'nM':
ic50_skip+=1
continue
elif re.search('Ki', bioactivity['bioactivity_type']):
ki_skip+=1
continue
elif re.search('Inhibition', bioactivity['bioactivity_type']):
inhb_skip+=1
else:
continue
self.cmpd_data = requests.get("https://www.ebi.ac.uk/chemblws/compounds/{}.json".format(bioactivity['ingredient_cmpd_chemblid']),proxies=self.proxy).json()
my_smiles = self.cmpd_data['compound']['smiles']
bioactivity['Smiles']=my_smiles
self.dr[count] = bioactivity
count+=1
SDtags = self.dr[0].keys()
cpd_counter=0
self.sd_entries = []
for x in range(len(self.dr)):
entry = self.dr[x]
cpd = Chem.MolFromSmiles(str(entry['Smiles']))
AllChem.Compute2DCoords(cpd)
cpd.SetProp("_Name",str(cpd_counter))
cpd_counter += 1
for tag in SDtags: cpd.SetProp(str(tag),str(entry[tag]))
self.sd_entries.append(cpd)
return True
def step_1_keeplargestfrag(self):
"""remove all smaller Fragments per compound, just keep the largest"""
result=[]
for cpd in self.sd_entries:
fragments = Chem.GetMolFrags(cpd,asMols=True)
list_cpds_fragsize = []
for frag in fragments:
list_cpds_fragsize.append(frag.GetNumAtoms())
largest_frag_index = list_cpds_fragsize.index(max(list_cpds_fragsize))
largest_frag = fragments[largest_frag_index]
result.append(largest_frag)
self.sd_entries = result
return True
def step_2_remove_dupl(self):
"""remove duplicates from self.sd_entries"""
result = []
all_struct_dict = {}
for cpd in self.sd_entries:
Chem.RemoveHs(cpd)
cansmi = Chem.MolToSmiles(cpd,canonical=True)
if not cansmi in all_struct_dict.keys():
all_struct_dict[cansmi] = []
all_struct_dict[cansmi].append(cpd)
for entry in all_struct_dict.keys():
if len(all_struct_dict[entry])==1:
all_struct_dict[entry][0].SetProp('cansmirdkit',entry)
result.append(all_struct_dict[entry][0])
self.sd_entries=result
return True
def step_3_merge_IC50(self):
"""merge IC50 of duplicates into one compound using mean of all values if:
min(IC50) => IC50_avg-3*IC50_stddev && max(IC50) <= IC50_avg+3*IC50_stddev && IC50_stddev <= IC50_avg"""
np_old_settings = np.seterr(invalid='ignore') #dirty way to ignore warnings from np.std
def get_mean_IC50(mol_list):
IC50 = 0
IC50_avg = 0
for bla in mol_list:
try:
IC50 += float(bla.GetProp("value"))
except:
print("no IC50 reported",bla.GetProp("_Name"))
IC50_avg = IC50 / len(mol_list)
return IC50_avg
def get_stddev_IC50(mol_list):
IC50_list = []
for mol in mol_list:
try:
IC50_list.append(round(float(mol.GetProp("value")),2))
except:
print("no IC50 reported",mol.GetProp("_Name"))
IC50_stddev = np.std(IC50_list,ddof=1)
return IC50_stddev,IC50_list
result = []
IC50_dict = {}
for cpd in self.sd_entries:
if not "cansmirdkit" in cpd.GetPropNames():
Chem.RemoveHs(cpd)
cansmi = Chem.MolToSmiles(cpd,canonical=True)
cpd.SetProp('cansmirdkit',cansmi)
cansmi = str(cpd.GetProp("cansmirdkit"))
IC50_dict[cansmi]={}
for cpd in self.sd_entries:
cansmi = str(cpd.GetProp("cansmirdkit"))
try:
IC50_dict[cansmi].append(cpd)
except:
IC50_dict[cansmi] = [cpd]
for entry in IC50_dict:
IC50_avg = str(get_mean_IC50(IC50_dict[entry]))
IC50_stddev,IC50_list = get_stddev_IC50(IC50_dict[entry])
IC50_dict[entry][0].SetProp("value_stddev",str(IC50_stddev))
IC50_dict[entry][0].SetProp("value",IC50_avg)
minimumvalue = float(IC50_avg)-3*float(IC50_stddev)
maximumvalue = float(IC50_avg)+3*float(IC50_stddev)
if round(IC50_stddev,1) == 0.0:
result.append(IC50_dict[entry][0])
elif IC50_stddev > float(IC50_avg):
runawaylist = []
for e in IC50_dict[entry]:
runawaylist.append(e.GetProp("_Name"))
print("stddev larger than mean", runawaylist, IC50_list, IC50_avg,IC50_stddev)
elif np.min(IC50_list) < minimumvalue or np.max(IC50_list) > maximumvalue:
pass
else:
result.append(IC50_dict[entry][0])
self.sd_entries=result
np.seterr(over=np_old_settings['over'],divide=np_old_settings['divide'],invalid=np_old_settings['invalid'],under=np_old_settings['under'])
return True
def step_4_set_TL(self,threshold,ic50_tag="value"):
"""set Property "TL"(TrafficLight) for each compound:
if ic50_tag (default:"value") > threshold: TL = 0, else 1"""
result = []
i,j = 0,0
for cpd in self.sd_entries:
if float(cpd.GetProp(ic50_tag))> float(threshold):
cpd.SetProp('TL','0')
i += 1
else:
cpd.SetProp('TL','1')
j += 1
result.append(cpd)
self.sd_entries = result
if self.verbous: print("## act: %d, inact: %d" % (j,i))
return True
def step_5_remove_descriptors(self):
"""remove list of Properties from each compound (hardcoded)
which would corrupt process of creating Prediction-Models"""
sd_tags = ['activity__comment','alogp','assay__chemblid','assay__description','assay__type','bioactivity__type','activity_comment','assay_chemblid','assay_description','assay_type','bioactivity_type','cansmirdkit','ingredient__cmpd__chemblid','ingredient_cmpd_chemblid','knownDrug','medChemFriendly','molecularFormula','name__in__reference','name_in_reference','numRo5Violations','operator','organism','parent__cmpd__chemblid','parent_cmpd_chemblid','passesRuleOfThree','preferredCompoundName','reference','rotatableBonds','smiles','Smiles','stdInChiKey','synonyms','target__chemblid','target_chemblid','target__confidence','target__name','target_confidence','target_name','units','value_avg','value_stddev'] + ['value']
result = []
for mol in self.sd_entries:
properties = mol.GetPropNames()
for tag in properties:
if tag in sd_tags: mol.ClearProp(tag)
result.append(mol)
self.sd_entries = result
return True
def step_6_calc_descriptors(self):
"""calculate descriptors for each compound, according to Descriptors._descList"""
nms=[x[0] for x in Descriptors._descList]
calc = MoleculeDescriptors.MolecularDescriptorCalculator(nms)
for i in range(len(self.sd_entries)):
descrs = calc.CalcDescriptors(self.sd_entries[i])
for j in range(len(descrs)):
self.sd_entries[i].SetProp(str(nms[j]),str(descrs[j]))
return True
def step_7_train_models(self):
"""train models according to trafficlight using sklearn.ensamble.RandomForestClassifier
self.model contains up to 10 models afterwards, use save_model_info(type) to create csv or html
containing data for each model"""
title_line = ["#","accuracy","MCC","precision","recall","f1","auc","kappa","prevalence","bias","pickel-File"]
self.csv_text= [title_line]
TL_list = []
property_list_list = []
directory = os.getcwd().split("/")[-2:]
dir_string = ';'.join(directory)
for cpd in self.sd_entries:
property_list = []
property_name_list = []
prop_name = cpd.GetPropNames()
for property in prop_name:
if property not in ['TL','value']:
try:
f = float(cpd.GetProp(property))
if math.isnan(f) or math.isinf(f):
print("invalid: %s" % property)
except ValueError:
print("valerror: %s" % property)
continue
property_list.append(f)
property_name_list.append(property)
elif property == 'TL':
TL_list.append(int(cpd.GetProp(property)))
else:
print(property)
pass
property_list_list.append(property_list)
dataDescrs_array = np.asarray(property_list_list)
dataActs_array = np.array(TL_list)
for randomseedcounter in range(1,11):
if self.verbous:
print("################################")
print("try to calculate seed %d" % randomseedcounter)
X_train,X_test,y_train,y_test = cross_validation.train_test_split(dataDescrs_array,dataActs_array,test_size=.4,random_state=randomseedcounter)
# try:
clf_RF = RandomForestClassifier(n_estimators=100,random_state=randomseedcounter)
clf_RF = clf_RF.fit(X_train,y_train)
cv_counter = 5
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='accuracy')
accuracy_CV = round(scores.mean(),3)
accuracy_std_CV = round(scores.std(),3)
calcMCC = make_scorer(metrics.matthews_corrcoef,greater_is_better=True,needs_threshold=False)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring=calcMCC)
MCC_CV = round(scores.mean(),3)
MCC_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='f1')
scores_rounded = [round(x,3) for x in scores]
f1_CV = round(scores.mean(),3)
f1_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='precision')
scores_rounded = [round(x,3) for x in scores]
precision_CV = round(scores.mean(),3)
precision_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='recall')
scores_rounded = [round(x,3) for x in scores]
recall_CV = round(scores.mean(),3)
recall_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='roc_auc')
scores_rounded = [round(x,3) for x in scores]
auc_CV = round(scores.mean(),3)
auc_std_CV = round(scores.std(),3)
y_predict = clf_RF.predict(X_test)
conf_matrix = metrics.confusion_matrix(y_test,y_predict)
# coh_kappa = cohenskappa.cohens_kappa(conf_matrix)
coh_kappa = cohens_kappa(conf_matrix)
kappa = round(coh_kappa['kappa'],3)
kappa_stdev = round(coh_kappa['std_kappa'],3)
tp = conf_matrix[0][0]
tn = conf_matrix[1][1]
fp = conf_matrix[1][0]
fn = conf_matrix[0][1]
n = tn+fp
p = tp+fn
kappa_prevalence = round(float(abs(tp-tn))/float(n),3)
kappa_bias = round(float(abs(fp-fn))/float(n),3)
if self.verbous:
print("test:")
print("\tpos\tneg")
print("true\t%d\t%d" % (tp,tn))
print("false\t%d\t%d" % (fp,fn))
print(conf_matrix)
print("\ntrain:")
y_predict2 = clf_RF.predict(X_train)
conf_matrix2 = metrics.confusion_matrix(y_train,y_predict2)
tp2 = conf_matrix2[0][0]
tn2 = conf_matrix2[1][1]
fp2 = conf_matrix2[1][0]
fn2 = conf_matrix2[0][1]
print("\tpos\tneg")
print("true\t%d\t%d" % (tp2,tn2))
print("false\t%d\t%d" % (fp2,fn2))
print(conf_matrix2)
result_string_cut = [randomseedcounter,
str(accuracy_CV)+"_"+str(accuracy_std_CV),
str(MCC_CV)+"_"+str(MCC_std_CV),
str(precision_CV)+"_"+str(precision_std_CV),
str(recall_CV)+"_"+str(recall_std_CV),
str(f1_CV)+"_"+str(f1_std_CV),
str(auc_CV)+"_"+str(auc_std_CV),
str(kappa)+"_"+str(kappa_stdev),
kappa_prevalence,kappa_bias,"model_file.pkl"]
self.model.append(clf_RF)
self.csv_text.append(result_string_cut)
# except Exception as e:
# print "got %d models" % len(self.model)
# print e
# sys.exit(-1)
# break
return True if len(self.model)>0 else False
def save_model_info(self,outfile,mode="html"):
"""create html- or csv-File for models according to mode (default: "html")"""
if mode=="csv":
if not outfile.endswith(".csv"): outfile += ".csv"
csv_file = open(outfile,"wb")
csv_file_writer = csv.writer(csv_file,delimiter=";",quotechar=' ')
for line in self.csv_text: csv_file_writer.writerow(line)
csv_file.flush()
csv_file.close()
elif mode=="html":
if not outfile.endswith(".html"): outfile += ".html"
def lines2list(lines):
return lines
def list2html(data,act,inact):
html_head = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title></title>
<style type="text/css">
table {
max-width: 100%;
background-color: transparent;
}
th {
text-align: left;
}
.table {
width: 100%;
margin-bottom: 20px;
}
.table > thead > tr > th,
.table > tbody > tr > th,
.table > tfoot > tr > th,
.table > thead > tr > td,
.table > tbody > tr > td,
.table > tfoot > tr > td {
padding: 8px;
line-height: 1.428571429;
vertical-align: top;
border-top: 1px solid #dddddd;
}
.table > thead > tr > th {MSC1013123
vertical-align: bottom;
border-bottom: 2px solid #dddddd;
}
.table > caption + thead > tr:first-child > th,
.table > colgroup + thead > tr:first-child > th,
.table > thead:first-child > tr:first-child > th,
.table > caption + thead > tr:first-child > td,
.table > colgroup + thead > tr:first-child > td,
.table > thead:first-child > tr:first-child > td {
border-top: 0;
}
.table > tbody + tbody {
border-top: 2px solid #dddddd;
}
.table .table {
background-color: #ffffff;
}
.table-condensed > thead > tr > th,
.table-condensed > tbody > tr > th,
.table-condensed > tfoot > tr > th,
.table-condensed > thead > tr > td,
.table-condensed > tbody > tr > td,
.table-condensed > tfoot > tr > td {
padding: 5px;
}
.table-bordered {
border: 1px solid #dddddd;
}
.table-bordered > thead > tr > th,
.table-bordered > tbody > tr > th,
.table-bordered > tfoot > tr > th,
.table-bordered > thead > tr > td,
.table-bordered > tbody > tr > td,
.table-bordered > tfoot > tr > td {
border: 1px solid #dddddd;
}
.table-bordered > thead > tr > th,
.table-bordered > thead > tr > td {
border-bottom-width: 2px;
}
.table-striped > tbody > tr:nth-child(odd) > td,
.table-striped > tbody > tr:nth-child(odd) > th {
background-color: #f9f9f9;
}
.table-hover > tbody > tr:hover > td,
.table-hover > tbody > tr:hover > th {
background-color: #f5f5f5;
}
table col[class*="col-"] {
position: static;
display: table-column;
float: none;
}
table td[class*="col-"],
table th[class*="col-"] {
display: table-cell;
float: none;
}
.table > thead > tr > .active,
.table > tbody > tr > .active,
.table > tfoot > tr > .active,
.table > thead > .active > td,
.table > tbody > .active > td,
.table > tfoot > .active > td,
.table > thead > .active > th,
.table > tbody > .active > th,
.table > tfoot > .active > th {
background-color: #f5f5f5;
}
.table-hover > tbody > tr > .active:hover,
.table-hover > tbody > .active:hover > td,
.table-hover > tbody > .active:hover > th {
background-color: #e8e8e8;
}
.table > thead > tr > .success,
.table > tbody > tr > .success,
.table > tfoot > tr > .success,
.table > thead > .success > td,
.table > tbody > .success > td,
.table > tfoot > .success > td,
.table > thead > .success > th,
.table > tbody > .success > th,
.table > tfoot > .success > th {
background-color: #dff0d8;
}
.table-hover > tbody > tr > .success:hover,
.table-hover > tbody > .success:hover > td,
.table-hover > tbody > .success:hover > th {
background-color: #d0e9c6;
}
.table > thead > tr > .danger,
.table > tbody > tr > .danger,
.table > tfoot > tr > .danger,
.table > thead > .danger > td,
.table > tbody > .danger > td,
.table > tfoot > .danger > td,
.table > thead > .danger > th,
.table > tbody > .danger > th,
.table > tfoot > .danger > th {
background-color: #f2dede;
}
.table-hover > tbody > tr > .danger:hover,
.table-hover > tbody > .danger:hover > td,
.table-hover > tbody > .danger:hover > th {
background-color: #ebcccc;
}
.table > thead > tr > .warning,
.table > tbody > tr > .warning,
.table > tfoot > tr > .warning,
.table > thead > .warning > td,
.table > tbody > .warning > td,
.table > tfoot > .warning > td,
.table > thead > .warning > th,
.table > tbody > .warning > th,
.table > tfoot > .warning > th {
background-color: #fcf8e3;
}
.table-hover > tbody > tr > .warning:hover,
.table-hover > tbody > .warning:hover > td,
.table-hover > tbody > .warning:hover > th {
background-color: #faf2cc;
}
@media (max-width: 767px) {
.table-responsive {
width: 100%;
margin-bottom: 15px;
overflow-x: scroll;
overflow-y: hidden;
border: 1px solid #dddddd;
-ms-overflow-style: -ms-autohiding-scrollbar;
-webkit-overflow-scrolling: touch;
}
.table-responsive > .table {
margin-bottom: 0;
}
.table-responsive > .table > thead > tr > th,
.table-responsive > .table > tbody > tr > th,
.table-responsive > .table > tfoot > tr > th,
.table-responsive > .table > thead > tr > td,
.table-responsive > .table > tbody > tr > td,
.table-responsive > .table > tfoot > tr > td {
white-space: nowrap;
}
.table-responsive > .table-bordered {
border: 0;
}
.table-responsive > .table-bordered > thead > tr > th:first-child,
.table-responsive > .table-bordered > tbody > tr > th:first-child,
.table-responsive > .table-bordered > tfoot > tr > th:first-child,
.table-responsive > .table-bordered > thead > tr > td:first-child,
.table-responsive > .table-bordered > tbody > tr > td:first-child,
.table-responsive > .table-bordered > tfoot > tr > td:first-child {
border-left: 0;
}
.table-responsive > .table-bordered > thead > tr > th:last-child,
.table-responsive > .table-bordered > tbody > tr > th:last-child,
.table-responsive > .table-bordered > tfoot > tr > th:last-child,
.table-responsive > .table-bordered > thead > tr > td:last-child,
.table-responsive > .table-bordered > tbody > tr > td:last-child,
.table-responsive > .table-bordered > tfoot > tr > td:last-child {
border-right: 0;
}
.table-responsive > .table-bordered > tbody > tr:last-child > th,
.table-responsive > .table-bordered > tfoot > tr:last-child > th,
.table-responsive > .table-bordered > tbody > tr:last-child > td,
.table-responsive > .table-bordered > tfoot > tr:last-child > td {
border-bottom: 0;
}
}
</style>
</head>
<body>
<p style="padding-left:10px;padding-top:10px;font-size:200%">Data for Models</p>
<p style="padding-left:10px;padding-right:10px;">"""
html_topPlot_start = """<table style="vertical-align:top; background-color=#CCCCCC">
<tr align="left" valign="top"><td><img src="pieplot.png"></td><td><H3>Distribution</H3><font color="#00C000">active %d</font><br><font color="#FF0000">inactive %d</td><td>"""
html_topPlot_bottom="""</td></tr></table>"""
html_tableStart="""<table class="table table-bordered table-condensed">
<thead>
<tr>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
</tr>
</thead>
<tbody>"""
html_tElements ="""
<tr bgcolor = "%s">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td><a href="%s">model.pkl</a></td>
</tr>"""
html_bottomPlot = """</tbody>
</table>
<img src="barplot.png"><br>"""
html_foot ="""
</p>
</body>
</html>"""
html_kappa_table_head="""<table class="table table-bordered table-condensed">
<thead>
<tr>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
</tr>
</thead>
<tbody>"""
html_kappa_table_element="""<tr bgcolor = "%s">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td><a href="%s">model.pkl</a></td>
</tr>"""
html_kappa_table_bottom="""</tbody>
</table>
<img src="barplot.png"><br>"""
best,worst = findBestWorst(data)
html = []
html.append(html_head)
html.append(html_topPlot_start % (act,inact))
html.append(html_topPlot_bottom)
html.append(html_tableStart % tuple(data[0]))
i = 0
for l in data[1:len(data)]:
l_replaced = []
for elem in l:
elem_string = str(elem)
if elem_string.find("pkl")==-1: l_replaced.append(elem_string.replace("_","±"))
else: l_replaced.append(elem_string)
c = ""
if i == best: c = "#9CC089"
if i == worst: c = "#FF3333"
html.append(html_tElements % tuple([c] + l_replaced))
i += 1
html.append(html_bottomPlot)
html.append(html_foot)
createBarPlot(data)
return html
def writeHtml(html,outf):
outf_h = open(outf,'w')
for block in html:
outf_h.write(block)
outf_h.flush()
outf_h.close()
return
def findBestWorst(data):
auc = [float(x[6].split("_")[0]) for x in data[1:]]
max_index,min_index = auc.index(max(auc)),auc.index(min(auc))
return (max_index,min_index)
def createPiePlot(cpds):
def getActInact(cpds):
act,inact=0,0
for cpd in cpds:
if int(cpd.GetProp('TL'))==0: inact+=1
else: act+=1
return act,inact
act_count,inact_count = getActInact(cpds)
print("act/inact from TL's %d/%d" % (act_count,inact_count))
fig = plt.figure(figsize=(2,2))
pie = plt.pie([inact_count,act_count],colors=('r','g'))
fig.savefig("pieplot.png",transparent=True)
return act_count,inact_count
def createBarPlot(data):
def getLists(data,col):
accList = []
errList = []
for x in data[1:]:
if x[col].find("_")==-1: continue
if x[col].find(".pkl")!=-1:continue
spl = x[col].split("_")
accList.append(float(spl[0]))
errList.append(float(spl[1]))
return accList,errList
def plotLists(cnt):
result=[]
clr = ['#DD1E2F','#EBB035','#06A2CB','#218559','#D0C6B1','#192823','#DDAACC']
# print ticks, list,errList,width
# print ticks
for i in range(1,cnt):
list,errList = getLists(data,i)
# print i,cnt,list,errList
result.append(ax.bar(ticks+width*i,list,width,color=clr[i-1],yerr=errList))
return result
fig,ax = plt.subplots()
fig.set_size_inches(15,6)
ticks = np.arange(0.0,12.0,1.2)
if len(self.model)==1: ticks = np.arange(0.0,1.0,1.5)
width = 0.15
plots = plotLists(8)
ax.set_xticks(ticks+0.75)
ax.set_xticklabels([str(x) for x in range(1,11,1)])
ax.set_ylabel("Accuracy")
ax.set_xlabel("# model")
ax.set_xlim(-0.3,14)
ax.set_ylim(-0.1,1.2)
ax.legend(tuple(plots),[x for x in data[0][1:8]],'upper right')
best,worst = findBestWorst(data)
if len(self.model)>1:
ax.annotate("best",xy=(ticks[best],0.85),xytext=(ticks[best]+0.25,1.1),color="green")
ax.annotate("worst",xy=(ticks[worst],0.85),xytext=(ticks[worst]+0.25,1.10),color="red")
fig.savefig("barplot.png",transparent=True)
return
act,inact = createPiePlot(self.sd_entries)
lines = self.csv_text
data = lines2list(lines)
html = list2html(data,act,inact)
writeHtml(html,outfile)
return True
def load_mols(self,sd_file):
"""load SD-File from .sdf, .sdf.gz or .sd.gz"""
if sd_file.endswith(".sdf.gz") or sd_file.endswith(".sd.gz"):
SDFile = Chem.ForwardSDMolSupplier(gzip.open(sd_file))
else:
SDFile = Chem.SDMolSupplier(sd_file)
self.sd_entries = [mol for mol in SDFile]
return True
def save_mols(self,outfile,gzip=True):
"""create SD-File of current molecules in self.sd_entries"""
sdw = Chem.SDWriter(outfile+".tmp")
for mol in self.sd_entries: sdw.write(mol)
sdw.flush()
sdw.close()
if not gzip:
os.rename(outfile+".tmp",outfile)
return
f_in = open(outfile+".tmp", 'rb')
f_out = gzip.open(outfile, 'wb')
f_out.writelines(f_in)
f_out.flush()
f_out.close()
f_in.close()
os.remove(outfile+".tmp")
return
def save_model(self,outfile,model_number=0):
"""save Model to file using cPickle.dump"""
cPickle.dump(self.model[model_number],file(outfile,"wb+"))
return
def load_models(self,model_files):
"""load model or list of models into self.model"""
if type(model_files)==str: model_files = [model_files]
i = 0
for mod_file in model_files:
model = open(mod_file,'r')
unPickled = Unpickler(model)
clf_RF = unPickled.load()
self.model.append(clf_RF)
model.close()
i += 1
return i
def predict(self,model_number):
"""try to predict activity of compounds using giving model-Number"""
if len(self.model)<=model_number:
sys.stderr.write("\nModel-Number %d doesn't exist, there are just %d Models\n" % (model_number,len(self.model)))
sys.exit(-1)
descriptors = []
active,inactive = 0,0
for D in Descriptors._descList:
descriptors.append(D[0])
calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptors)
clf_RF = self.model[model_number]
for sample in self.sd_entries:
use = False
try:
pattern = calculator.CalcDescriptors(sample)
use = True
except e:
sys.stderr.write("Error computing descriptors for %s, skip" % sample)
if use:
dataDescrs_array = np.asarray(pattern)
y_predict = int(clf_RF.predict(dataDescrs_array)[0])
if y_predict==0: inactive += 1
if y_predict==1: active += 1
sample.SetProp("TL_prediction",str(y_predict))
return (active,inactive)
if __name__ == "__main__":
def step_error(step):
sys.stderr.write("Error in Step: %s" % step)
usage = "usage: python master.py [--accession=<Acc_ID>] [--sdf=<sdf-File>] --dupl/--uniq [--rof] [--combine=<file1>,<file2>] [--IC50=<IC50_tag>] [--cutoff=<value>] [--remove_descr=<txt_file>] [--proxy=<https://user:[email protected]:portnumber] [--verbous] [--check_models=<model.pkl>]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--accession',action='store',type='string',dest='accession',help="Accession ID of Protein (hint: P43088 is Vitamin_D_Receptor with ~200 compounds)",default='')
parser.add_option('--rof',action='store_true',dest='onefile',help='remove obsolete Files',default=False)
parser.add_option('--dupl',action='store_true',dest='dupl',help='use only duplicates',default=False)
parser.add_option('--uniq',action='store_true',dest='uniq',help='use only uniques',default=False)
parser.add_option('--combine',action='store',type='string',dest='combine',help='Combine 2 SDF/SDF.GZ Files',default='')
parser.add_option('--IC50',action='store',type='string',dest='SD_tag',help='name of IC50 field, default is \'value\'',default='value')
parser.add_option('--cutoff',action='store',type='int',dest='cutoff',help='cutoff-value for hERG-trafficlight, default is \'5000\'',default=5000)
parser.add_option('--remove_descr',action='store',type='string',dest='remove_descr',help='file with SDtags2remove, line-wise default:<internal list>',default='')
parser.add_option('--proxy',action='store',type='string',dest='proxy',help='Use this Proxy',default='')
parser.add_option('--sdf',action='store',type='string',dest='sdf',help='load this SDF-File',default='')
parser.add_option('--verbous',action='store_true',dest='verbous',help='verbous',default=False)
parser.add_option('--check_models',action='store',type='string',dest='modelfile',help='check compounds with this model',default='')
(options,args) = parser.parse_args()
combineItems = options.combine.split(',')
if len(combineItems) == 1 and len(combineItems[0])>0:
print('need 2 files to combine')
print(usage)
sys.exit(-1)
elif len(combineItems) == 2 and len(combineItems[0])>0 and len(combineItems[1])>0:
cur_file = _04.combine(combineItems[0],combineItems[1])
print("File: %s" % cur_file)
sys.exit(0)
code = options.accession.split(':')
if len(code)==1:
accession = code[0]
else:
accession = code[1]
if options.accession == '' and options.sdf == '':
print("please offer Accession-Number or SDF-File")
print("-h for help")
sys.exit(-1)
if options.dupl==False and options.uniq==False:
print("Please select uniq or dupl -h for help")
print("-h for help")
sys.exit(-1)
pco = p_con(accession,proxy=options.proxy)
pco.verbous = options.verbous
if options.sdf != '':
print("load sdf from File: %s" % options.sdf)
result = pco.load_mols(options.sdf)
if not result:
step_error("load SDF-File")
sys.exit(-1)
else:
print("gather Data for Accession-ID \'%s\'" % accession)
result = pco.step_0_get_chembl_data()
if not result:
step_error("download ChEMBL-Data")
sys.exit(-1)
result = pco.step_1_keeplargestfrag()
if not result:
step_error("keep largest Fragment")
sys.exit(-1)
if options.uniq:
result = pco.step_2_remove_dupl()
if not result:
step_error("remove duplicates")
sys.exit(-1)
result = pco.step_3_merge_IC50()
if not result:
step_error("merge IC50-Values for same Smiles")
sys.exit(-1)
if options.modelfile != '':
result = pco.load_models(options.modelfile.split(","))
if not result:
step_error("Load Model-Files")
sys.exit(-1)
print("\n#Model\tActive\tInactive")
for i in range(len(pco.model)):
act,inact = pco.predict(i)
print("%d\t%d\t%d" % (i,act,inact))
sys.exit(0)
result = pco.step_4_set_TL(options.cutoff)
if not result:
step_error("set Trafficlight for cutoff")
sys.exit(-1)
result = pco.step_5_remove_descriptors()
if not result:
step_error("remove descriptors")
sys.exit(-1)
result = pco.step_6_calc_descriptors()
if not result:
step_error("calculate Descriptors")
sys.exit(-1)
result = pco.step_7_train_models()
if not result:
step_error("Training of Models")
sys.exit(-1)
pco.save_model_info("model_info.csv",mode="csv")
pco.save_model_info("model_info.html",mode="html")
for i in range(len(pco.model)):
filename = "%s_%dnm_model_%d.pkl" % (accession,options.cutoff,i)
pco.save_model(filename,i)
print("Model %d saved into File: %s" % (i,filename))
for i in range(len(pco.model)):
act,inact = pco.predict(i)
print("Model %d active: %d\tinactive: %d" % (i,act,inact))
|
bsd-3-clause
|
SnakeJenny/TensorFlow
|
tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py
|
116
|
5164
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
sweverett/Balrog-GalSim
|
plots/plot_YZ.py
|
1
|
29005
|
### YZ March 29, 2018
### call method: fignames=plot_YZ.make_all(basepath=None, tile_list=None, realizations=None)
## example call: fignames=plot_YZ.make_all(basepath='/data/des71.a/data/kuropat/blank_test/y3v02/balrog_images', tile_list=['DES0347-5540'], realizations=['0', '1'])
### It will make a bunch of plots with names listed in fignames
##this file also contains the followign functions readfiles(), match_func(), running_medians()
## dm_m_plot(), dm_T_plot(), dm_dT_plot()
from sys import argv
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
from esutil import htm
import sys
import os
from astropy.table import Table, hstack, vstack
def read_files(basepath, tile_list, realizations):
truth_gals=[]
truth_stas=[]
measu_objs=[]
for tile in tile_list:
for realn in realizations:
truth_gal_file=os.path.join(basepath, realn, tile, tile+'_'+realn+'_balrog_truth_cat_gals.fits')
truth_star_file=os.path.join(basepath, realn, tile, tile+'_'+realn+'_balrog_truth_cat_stars.fits')
meas_file=os.path.join(basepath, realn, tile, 'mof', tile+'_mof.fits')
if os.path.isfile(truth_gal_file):
gals_temp=Table.read(truth_gal_file, hdu=1)
if len(gals_temp) > 0:
if len(truth_gals)==0:
truth_gals=gals_temp
else:
truth_gals=vstack([truth_gals, gals_temp])
if os.path.isfile(truth_star_file):
stas_temp=Table.read(truth_star_file, hdu=1)
if len(stas_temp) > 0:
if len(truth_stas)==0:
truth_stas=stas_temp
else:
truth_stas=vstack([truth_stas, stas_temp])
if os.path.isfile(meas_file):
objs_temp=Table.read(meas_file, hdu=1)
if len(objs_temp) > 0:
if len(measu_objs)==0:
measu_objs=objs_temp
else:
measu_objs=vstack([measu_objs, objs_temp])
print 'found:\n %i truth gals,\n %i truth stars,\n %i observed objects'%(len(truth_gals), len(truth_stas), len(measu_objs))
return truth_gals, truth_stas, measu_objs
def match_func(ra1st, dec1st, ra2st, dec2st, cat1, cat2, comp_dis=0.5):
if len(cat1) > 0 and len(cat2) > 0:
ra1=cat1[ra1st]
dec1=cat1[dec1st]
ra2=cat2[ra2st]
dec2=cat2[dec2st]
cosA=np.cos(np.mean(0)*np.pi/180.0)
cosB=np.cos(np.mean(0)*np.pi/180.0)
A= np.array([ra1*cosA, dec1]).transpose()
B= np.array([ra2*cosB, dec2]).transpose()
tree = cKDTree( B)
dis, inds = tree.query(A , k=1, p=2)
dis=dis*3600.0
indA, =np.where(dis < comp_dis)
indB=inds[indA]
if len(indA) > 0:
print ' %i objects matched'%(len(indA))
return cat1[indA], cat2[indB]
else:
print ' No matches found!'
return [], []
else:
print 'Catalog empty!'
return [], []
def estimate_dist(ra1, dec1, ra2, dec2):
cosA=np.cos(np.mean(0)*np.pi/180.0)
cosB=np.cos(np.mean(0)*np.pi/180.0)
A= np.array([ra1*cosA, dec1]).transpose()
B= np.array([ra2*cosB, dec2]).transpose()
tree = cKDTree( B)
dis, inds = tree.query(A , k=2, p=2)
dis=dis*3600.0
disB=dis[:, 1]
return disB
def running_medians(xx, yy, binsize=1):
bins_lo=np.arange(np.min(xx), np.max(xx), binsize)
bins_up=bins_lo+binsize
yy_med=np.zeros(len(bins_up))
yy_lo=np.zeros(len(bins_up))
yy_hi=np.zeros(len(bins_up))
xx_bin=np.zeros(len(bins_up))-10.0**15.0
for ii in range(len(bins_lo)):
bin_xx_lo=bins_lo[ii]
bin_xx_up=bins_up[ii]
ind, =np.where( (xx > bin_xx_lo)&(xx < bin_xx_up) )
if len(ind) > 0:
yy_med[ii]=np.median(yy[ind])
xx_bin[ii]=np.median(xx[ind])
yy_lo[ii]=np.percentile(yy[ind], 15.9)
yy_hi[ii]=np.percentile(yy[ind], 84.1)
ind=np.where(xx_bin > -10.0**15.0)
xx_bin=xx_bin[ind]
yy_med=yy_med[ind]
yy_lo=yy_lo[ind]
yy_hi=yy_hi[ind]
return xx_bin, yy_med, yy_lo, yy_hi
def dm_m_plot(t_gm, o_gm, t_sm, o_sm, up_perc=1, lo_perc=99, figname=None):
plt.figure(figsize=(9, 9))
if len(t_gm) > 0:
plt.subplot(431)
yy=o_gm['cm_mag'][:, 0]-t_gm['cm_mag'][:, 0]
xx=t_gm['cm_mag'][:, 0]
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.title('Galaxies (cm_mag)', fontsize=10)
plt.xlabel('True cm_mag (g) ', fontsize=10)
plt.ylabel('Obs -True cm_mag (g)', fontsize=8)
plt.subplot(434)
yy=o_gm['cm_mag'][:, 1]-t_gm['cm_mag'][:, 1]
xx=t_gm['cm_mag'][:, 1]
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True cm_mag (r) ', fontsize=10)
plt.ylabel('Obs -True cm_mag (r)', fontsize=8)
plt.subplot(437)
yy=o_gm['cm_mag'][:, 2]-t_gm['cm_mag'][:, 2]
xx=t_gm['cm_mag'][:, 2]
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True cm_mag (i) ', fontsize=10)
plt.ylabel('Obs -True cm_mag (i)', fontsize=8)
plt.subplot(4, 3, 10)
yy=o_gm['cm_mag'][:, 3]-t_gm['cm_mag'][:, 3]
xx=t_gm['cm_mag'][:, 3]
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True cm_mag (z) ', fontsize=10)
plt.ylabel('Obs -True cm_mag (z)', fontsize=8)
if len(t_sm) > 0:
plt.subplot(432)
xx=t_sm['g_Corr']
yy=o_sm['cm_mag'][:, 0]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2, label=None)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--', label='50, 15.9, 84.1 percentiles')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.title('Stars (cm_mag comparison)', fontsize=10)
plt.xlabel('True mag (g) ', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (g)', fontsize=8)
plt.legend(loc=3, fontsize=8)
plt.subplot(435)
xx=t_sm['g_Corr']-t_sm['gr_Corr']
yy=o_sm['cm_mag'][:, 1]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True mag (r) ', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (r)', fontsize=8)
plt.subplot(438)
xx=t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr']
yy=o_sm['cm_mag'][:, 2]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True mag (i) ', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (i)', fontsize=8)
plt.subplot(4, 3, 11)
xx=t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr']-t_sm['iz_Corr']
yy=o_sm['cm_mag'][:, 3]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True mag (z) ', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (z)', fontsize=8)
plt.subplot(433)
xx=t_sm['g_Corr']
yy=o_sm['psf_mag'][:, 0]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.plot([14, 28], [0, 0], 'r:')
plt.title('Stars (psf_mag comparison)', fontsize=10)
plt.xlabel('True mag (g) ', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (g)', fontsize=8)
plt.subplot(436)
xx=t_sm['g_Corr']-t_sm['gr_Corr']
yy=o_sm['psf_mag'][:, 1]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True mag (r) ', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (r)', fontsize=8)
plt.subplot(439)
xx=t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr']
yy=o_sm['psf_mag'][:, 2]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True mag (i) ', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (i)', fontsize=8)
plt.subplot(4, 3, 12)
xx=t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr']-t_sm['iz_Corr']
yy=o_sm['psf_mag'][:, 3]-xx
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([14, 28])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([14, 28], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('True mag (z) ', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (z)', fontsize=8)
plt.tight_layout()
if figname is None:
plt.show()
return []
else:
plt.savefig(figname)
return figname
def dm_T_plot(t_gm, o_gm, t_sm, o_sm, up_perc=1, lo_perc=99, figname=None):
plt.figure(figsize=(9, 9))
if len(t_gm) > 0:
ind=np.where((o_gm['cm_T'])> 0 & (o_gm['cm_T']< 10.0**15.0))
t_gm=t_gm[ind]
o_gm=o_gm[ind]
plt.subplot(431)
yy=o_gm['cm_mag'][:, 0]-t_gm['cm_mag'][:, 0]
xx=np.log10(o_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 7])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 7], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy, binsize=1)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs -True cm_mag (g)', fontsize=8)
plt.subplot(434)
yy=o_gm['cm_mag'][:, 1]-t_gm['cm_mag'][:, 1]
xx=np.log10(o_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 7])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 7], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs -True cm_mag (r)', fontsize=8)
plt.subplot(437)
yy=o_gm['cm_mag'][:, 2]-t_gm['cm_mag'][:, 2]
xx=np.log10(o_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 7])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 7], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs -True cm_mag (i)', fontsize=8)
plt.subplot(4, 3, 10)
yy=o_gm['cm_mag'][:, 3]-t_gm['cm_mag'][:, 3]
xx=np.log10(o_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 7])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 7], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs -True cm_mag (z)', fontsize=8)
if len(t_sm) > 0:
ind=np.where((o_sm['cm_T'])> 0 & (o_sm['cm_T']< 10.0**15.0))
t_sm=t_sm[ind]
o_sm=o_sm[ind]
plt.subplot(432)
yy=o_sm['cm_mag'][:, 0]-(t_sm['g_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2, label=None)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--', label='50, 15.9, 84.1 percentiles')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.title('Stars (cm_mag comparison)', fontsize=10)
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (g)', fontsize=8)
plt.legend(loc=3, fontsize=8)
plt.subplot(435)
yy=o_sm['cm_mag'][:, 1]-(t_sm['g_Corr']-t_sm['gr_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (r)', fontsize=8)
plt.subplot(438)
yy=o_sm['cm_mag'][:, 2]-(t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (i)', fontsize=8)
plt.subplot(4, 3, 11)
yy=o_sm['cm_mag'][:, 3]-(t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr']-t_sm['iz_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs cm_mag -True mag (z)', fontsize=8)
plt.subplot(433)
yy=o_sm['psf_mag'][:, 0]-(t_sm['g_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.plot([14, 28], [0, 0], 'r:')
plt.title('Stars (psf_mag comparison)', fontsize=10)
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (g)', fontsize=8)
plt.subplot(436)
yy=o_sm['psf_mag'][:, 1]-(t_sm['g_Corr']-t_sm['gr_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (r)', fontsize=8)
plt.subplot(439)
yy=o_sm['psf_mag'][:, 2]-(t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (i)', fontsize=8)
plt.subplot(4, 3, 12)
yy=o_sm['psf_mag'][:, 3]-(t_sm['g_Corr']-t_sm['gr_Corr']-t_sm['ri_Corr']-t_sm['iz_Corr'])
xx=np.log10(o_sm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-3, 5])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-3, 5], [0, 0], 'b:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
plt.xlabel('Obs log10(cm_T)', fontsize=10)
plt.ylabel('Obs psf_mag -True mag (z)', fontsize=8)
plt.tight_layout()
if figname is None:
plt.show()
return []
else:
plt.savefig(figname)
return figname
def dm_dT_plot(t_gm, o_gm, t_sm, o_sm, up_perc=1, lo_perc=99, figname=None):
plt.figure(figsize=(4, 9))
if len(t_gm) > 0:
ind=np.where((o_gm['cm_T'])> 0 & (o_gm['cm_T']< 10.0**15.0))
t_gm=t_gm[ind]
o_gm=o_gm[ind]
plt.subplot(411)
yy=o_gm['cm_mag'][:, 0]-t_gm['cm_mag'][:, 0]
xx=(o_gm['cm_T']-t_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-10, 10000])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-1000, 10000], [0, 0], 'b:')
plt.xscale('symlog')
plt.xlabel('cm_T Obs-Truth', fontsize=10)
plt.ylabel('Obs-True cm_mag (g)', fontsize=8)
plt.subplot(412)
yy=o_gm['cm_mag'][:, 1]-t_gm['cm_mag'][:, 1]
xx=(o_gm['cm_T']-t_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-10, 10000])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-1000, 10000], [0, 0], 'b:')
plt.xscale('symlog')
plt.xlabel('cm_T Obs-Truth', fontsize=10)
plt.ylabel('Obs-True cm_mag (r)', fontsize=8)
plt.subplot(413)
yy=o_gm['cm_mag'][:, 2]-t_gm['cm_mag'][:, 2]
xx=(o_gm['cm_T']-t_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-10, 10000])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-1000, 10000], [0, 0], 'b:')
plt.xscale('symlog')
plt.xlabel('cm_T Obs-Truth', fontsize=10)
plt.ylabel('Obs -True cm_mag (i)', fontsize=8)
plt.subplot(414)
yy=o_gm['cm_mag'][:, 3]-t_gm['cm_mag'][:, 3]
xx=(o_gm['cm_T']-t_gm['cm_T'])
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.xlim([-10, 10000])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([-1000, 10000], [0, 0], 'b:')
plt.xscale('symlog')
plt.xlabel('cm_T Obs-Truth', fontsize=10)
plt.ylabel('Obs -True cm_mag (z)', fontsize=8)
plt.tight_layout()
if figname is None:
plt.show()
return []
else:
plt.savefig(figname)
return figname
#------------------------------------------------------------------------------
# Plots added by Spencer
def df_f_plot(t_gf, o_gf, t_sf, o_sf, up_perc=1, lo_perc=99, figname=None):
plt.figure(figsize=(9, 9))
if len(t_gf) > 0:
bands = ['g', 'r', 'i', 'z']
for i in range(4):
plt.subplot(4,3,3*i+1)
yy=o_gf['cm_flux'][:, i]-t_gf['cm_flux'][:, i]
xx=t_gf['cm_flux'][:, i]
ind=np.where((yy>-10**4)&(yy < 10**4)&(o_gf['flags']==0)&(o_gf['cm_T']<100))
xx=xx[ind];yy=yy[ind]
plt.xlim([0, 10**4])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([0, 10**4], [0, 0], 'k:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
ax = plt.gca()
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
if i==0:
plt.title('Galaxies Flux (cm_flux)', fontsize=10)
plt.xlabel('True cm_flux ({})'.format(bands[i]), fontsize=10)
plt.ylabel('Obs -True cm_flux ({})'.format(bands[i]), fontsize=8)
# Chi scatter
plt.subplot(4,3,3*i+2)
yy=(o_gf['cm_flux'][:, i]-t_gf['cm_flux'][:, i]) / np.sqrt(o_gf['cm_flux_cov'][:, i, i])
# Use old xx and ind
yy=yy[ind]
plt.xlim([0, 10**4])
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.plot([0, 10**4], [0, 0], 'k:')
xx_bin, yy_med, yy_lo, yy_hi=running_medians(xx, yy)
ax = plt.gca()
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
plt.plot(xx_bin, yy_med, 'r--')
plt.plot(xx_bin, yy_lo, 'r--')
plt.plot(xx_bin, yy_hi, 'r--')
if i==0:
plt.title('Galaxies Flux Chi (cm_flux / cm_flux_err)', fontsize=10)
plt.xlabel('True cm_flux chi ({})'.format(bands[i]), fontsize=10)
plt.ylabel('Obs -True cm_flux chi ({})'.format(bands[i]), fontsize=8)
# Chi scatter histogram
yy=(o_gf['cm_flux'][:, i]-t_gf['cm_flux'][:, i]) / np.sqrt(o_gf['cm_flux_cov'][:, i, i])
xx=t_gf['cm_flux'][:, i]
# Makes histograms more uniform
ind=np.where((yy>-10**2)&(yy < 10**2)&(o_gf['flags']==0)&(o_gf['cm_T']<100))
xx=xx[ind]
yy=yy[ind]
plt.subplot(4,3,3*i+3)
plt.xlim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.hist(yy, histtype='step', ec='k', bins=150)
ax = plt.gca()
ax.axvline(x=0, color='k', linestyle=':', linewidth=2)
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
if i==0:
plt.title('Galaxies Chi Distribution', fontsize=10)
plt.xlabel('Obs -True cm_flux chi ({})'.format(bands[i]), fontsize=8)
plt.ylabel('Counts')
plt.tight_layout()
if figname is None:
plt.show()
return []
else:
plt.savefig(figname)
return figname
def dT_dist_plot(t_gm, o_gm, t_sm, o_sm, oo, up_perc=1, lo_perc=99, figname=None):
plt.figure()
if len(t_gm) > 0:
ind=np.where((o_gm['cm_T'])> 0 & (o_gm['cm_T']< 10.0**15.0))
t_gm=t_gm[ind]
o_gm=o_gm[ind]
dists=estimate_dist(t_gm['ra'], t_gm['dec'], oo['ra'], oo['dec'])
xx=o_gm['cm_weight']#-t_gm['cm_TdByTe']
yy=o_gm['cm_T']/t_gm['cm_T']
ind=np.where((yy>-10)&(yy < 10))
xx=xx[ind];yy=yy[ind]
plt.ylim([np.percentile(yy, up_perc), np.percentile(yy, lo_perc)])
plt.scatter(xx, yy, 1, marker='o', alpha=0.2)
plt.xlim([np.percentile(xx, up_perc), np.percentile(xx, lo_perc)])
plt.tight_layout()
if figname is None:
plt.show()
return []
else:
plt.savefig(figname)
return figname
def make_all(basepath=None, tile_list=None, realizations=None, outdir=None):
if basepath is None:
basepath='/data/des71.a/data/kuropat/blank_test/y3v02/balrog_images/'
if realizations is None:
realizations=os.listdir(basepath)
if tile_list is None:
tile_list=os.listdir( os.path.join(basepath, realizations[0]) )
if outdir is None:
outdir = os.getcwd()
## Make output directory if it does not yet exist
if not os.path.exists(outdir):
print 'output dir doesnt exist. Please make the output dir.'
sys.exit(0)
##read in files
tg, ts, oo=read_files(basepath, tile_list, realizations)
### doing matching
truth_gm, obs_gm=match_func('ra', 'dec', 'ra', 'dec', tg, oo, comp_dis=0.5)
truth_sm, obs_sm=match_func('RA_new', 'DEC_new', 'ra', 'dec', ts, oo, comp_dis=0.5)
### make plots
names=[]
##Diff_m vs True_m plots
fn1 = os.path.join(outdir, 'dm_m_YZ.png')
names=np.append(names, dm_m_plot(truth_gm, obs_gm, truth_sm, obs_sm, figname=fn1))
##Diff_m vs Obs_T plots
fn2 = os.path.join(outdir, 'dm_T_YZ.png')
names=np.append(names, dm_T_plot(truth_gm, obs_gm, truth_sm, obs_sm, figname=fn2))
##Diff_m vs diff T plots
fn3 = os.path.join(outdir, 'dm_dT_gals_YZ.png')
names=np.append(names, dm_dT_plot(truth_gm, obs_gm, truth_sm, obs_sm, figname=fn3))
##Diff T vs distance to nearest neighbor plots
# commented out. my experiment plot
#fn4 = os.path.join(outdir, 'dT_dist_gals_YZ.png')
#names=np.append(names, dT_dist_plot(truth_gm, obs_gm, truth_sm, obs_sm, oo, figname=fn4))
##Diff_f vs True_f plots
fn4 = os.path.join(outdir, 'df_f_spencer.png')
# names=np.append(names, df_f_plot(truth_gm, obs_gm, truth_sm, obs_sm, up_perc=0, lo_perc=100, figname=fn4))
names=np.append(names, df_f_plot(truth_gm, obs_gm, truth_sm, obs_sm, figname=fn4))
print 'generated plots: ', names
return names
if __name__ == "__main__":
## We can make this fancier, but for now this is simple enough. Could use argparse instead.
# First argument is basepath
try:
print(argv[1])
basepath = argv[1]
except IndexError:
basepath = None
# Second argument is output directory
try:
print(argv[2])
outdir = argv[2]
except IndexError:
outdir = None
make_all(basepath=basepath, outdir=outdir)
|
mit
|
khkaminska/scikit-learn
|
examples/plot_multilabel.py
|
236
|
4157
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
bsd-3-clause
|
lbishal/scikit-learn
|
examples/feature_selection/plot_select_from_model_boston.py
|
146
|
1527
|
"""
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
|
bsd-3-clause
|
mfjb/scikit-learn
|
sklearn/manifold/isomap.py
|
229
|
7169
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
RachitKansal/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
sanketloke/scikit-learn
|
benchmarks/bench_plot_neighbors.py
|
101
|
6469
|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
|
bsd-3-clause
|
zehpunktbarron/iOSMAnalyzer
|
scripts/c5_tag_completeness_health_care.py
|
1
|
12360
|
# -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the development of the tag-completeness [%] of all "health-care" POIs
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
--
-- Gesundheitswesen
--
SELECT
generate_series,
-- START Key "name"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_name * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_name,
-- END Key "name"
-- START Key "operator"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_operator * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_operator,
-- END Key "operator"
-- START Key "website"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_website * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_website,
-- END Key "website"
-- START Key "housenumber"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_housenumber * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_housenumber,
-- END Key "housenumber"
-- START Key "phone"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_phone * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_phone,
-- END Key "phone"
-- START Key "wheelchair"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_wheelchair * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_wheelchair
-- END Key "wheelchair"
FROM
(SELECT generate_series,
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'name'
) AS cnt_name,
-- START operator
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'operator'
) AS cnt_operator,
-- END operator
-- START website
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'website'
) AS cnt_website,
-- END website
-- START housenumber
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'addr:housenumber'
) AS cnt_housenumber,
-- END housenumber
-- START phone
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'phone'
) AS cnt_phone,
-- END phone
-- START wheelchair
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'wheelchair'
) AS cnt_wheelchair,
-- END wheelchair
-- START total
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Gesundheitswesen
(
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
) AS cnt_total
-- END total
FROM generate_series(
(SELECT date_trunc ('month',(
SELECT MIN(valid_from) FROM hist_plp)) as foo), -- Select minimum date (month)
(SELECT MAX(valid_from) FROM hist_plp)::date, -- Select maximum date
interval '1 month')
) AS foo2
;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
###
### Plot (Multiline-Chart)
###
# Datatypes of the returning data
datatypes = [('date', 'S20'),('col2', 'double'), ('col3', 'double'), ('col4', 'double'), ('col5', 'double'), ('col6', 'double'), ('col7', 'double')]
# Data-tuple and datatype
data = np.array(data_tuples, dtype=datatypes)
# Date comes from 'col1'
col2 = data['col2']
col3 = data['col3']
col4 = data['col4']
col5 = data['col5']
col6 = data['col6']
col7 = data['col7']
# Converts date to a manageable date-format for matplotlib
dates = mdates.num2date(mdates.datestr2num(data['date']))
fig, ax = plt.subplots()
# set figure size
fig.set_size_inches(12,8)
# Create linechart
plt.plot(dates, col2, color = '#2dd700', linewidth=2, label='name = *')
#plt.plot(dates, col3, color = '#00a287', linewidth=2, label='opening_hours = *')
plt.plot(dates, col3, color = '#ff6700', linewidth=2, linestyle='dashed', label='operator = *')
plt.plot(dates, col4, color = '#ff6700', linewidth=2, label='website = *')
plt.plot(dates, col5, color = '#f5001d', linewidth=2, label='addr:housenumber = *')
plt.plot(dates, col6, color = '#2dd700', linewidth=2, linestyle='dashed', label='phone = *')
plt.plot(dates, col7, color = '#00a287', linewidth=2, linestyle='dashed', label='wheelchair = *')
# Forces the plot to start from 0 and end at 100
pylab.ylim([0,100])
# Place a gray dashed grid behind the thicks (only for y-axis)
ax.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Date')
plt.ylabel('Tag-Completeness [%]')
# Locate legend on the plot (http://matplotlib.org/users/legend_guide.html#legend-location)
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height * 0.9])
# Put a legend to the right of the current axis and reduce the font size
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':9})
# Plot-title
plt.title('Development of the Tag-Completeness of all Health-Care POIs')
# Save plot to *.jpeg-file
plt.savefig('pics/c5_tag_completeness_health_care.jpeg')
plt.clf()
|
gpl-3.0
|
kernc/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
25
|
25114
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
|
bsd-3-clause
|
xyguo/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
31
|
4757
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
themiwi/pdfFoam
|
tutorials/AutoIgnitionBurner/tools/plot_ydot.py
|
1
|
2284
|
#!/usr/bin/env python
import matplotlib as mp
import matplotlib.pyplot as plt
from matplotlib.texmanager import TexManager
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
mp.rc('text', usetex=True)
mp.rcParams['font.sans-serif'] = 'computer modern bright'
TexManager.font_info['computern modern bright'] = (
('cmbr', r'\usepackage{cmbright}'))
mp.rcParams['text.latex.preamble'] = (
r'\PassOptionsToPackage{dvips}{graphicx}',
r'\usepackage{cmbright}',
r'\usepackage{units}',
r'\usepackage{rotating}',
)
mp.rc('font', family='sans-serif', size=10)
mp.rc('axes', titlesize='small', labelsize='x-small')
mp.rc('legend', fontsize='x-small')
mp.rc('xtick', labelsize='x-small')
mp.rc('ytick', labelsize='x-small')
CMRMap = mp.colors.LinearSegmentedColormap(
'CMR',
{
'blue': ((0.0, 0.0, 0.0),
(0.125, 0.5, 0.5),
(0.25, 0.75, 0.75),
(0.375, 0.5, 0.5),
(0.5, 0.15, 0.15),
(0.625, 0.0, 0.0),
(0.75, 0.1, 0.1),
(0.875, 0.5, 0.5),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.125, 0.15, 0.15),
(0.25, 0.15, 0.15),
(0.375, 0.2, 0.2),
(0.5, 0.25, 0.25),
(0.625, 0.5, 0.5),
(0.75, 0.75, 0.75),
(0.875, 0.9, 0.9),
(1.0, 1.0, 1.0)),
'red': ((0.0, 0.0, 0.0),
(0.125, 0.15, 0.15),
(0.25, 0.3, 0.3),
(0.375, 0.6, 0.6),
(0.5, 1.0, 1.0),
(0.625, 0.9, 0.9),
(0.75, 0.9, 0.9),
(0.875, 0.9, 0.9),
(1.0, 1.0, 1.0))}
)
data = np.loadtxt('tmp.dat')
npv = sum(data[:,2]==data[0,2])
data = data.reshape((npv, -1, data.shape[1]))
Z = data[:, :, 0]
PV = data[:, :, 2]
Ydot = data[:, :, 4]
fig = plt.gcf()
fig.set_size_inches(4, 3)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(Z, PV, Ydot, rstride=1, cstride=1, cmap=cm.jet, #CMRMap,
linewidth=0, antialiased=False, shade='interp')
ax.set_xlabel(r'$z\ \left[-\right]$')
ax.set_ylabel(r'PV\ $\left[-\right]$')
ax.set_zlabel(r'\turnbox{180}{$\dot{\omega}_c\ \left[\unitfrac{1}{s}\right]$}')
#ax.set_zlabel(r'$\dot{\omega}_c\ \left[\unitfrac{1}{s}\right]$')
ax.view_init(azim=220)
#fig.savefig('Ydot.eps')
fig.savefig('Ydot.png', dpi=600)
#plt.show(block=True)
|
gpl-2.0
|
makeyourowntextminingtoolkit/makeyourowntextminingtoolkit
|
text_mining_toolkit/index_cooccurrence.py
|
1
|
5666
|
# module for indexing a corpus for co-occurrence of words
import glob
import os
import collections
import pandas
import math
# max columns when printing .. (may not be needed if auto detected from display)
pandas.set_option('max_columns', 5)
# delete matrix
def delete_matrix(content_directory):
cooccurrence_matrix_file = content_directory + "matrix_cooccurrence.hdf5"
if os.path.isfile(cooccurrence_matrix_file):
os.remove(cooccurrence_matrix_file)
print("removed co-occurrence matrix file: ", cooccurrence_matrix_file)
pass
pass
# print existing matrix
def print_matrix(content_directory):
# open matrix file
cooccurrence_matrix_file = content_directory + "matrix_cooccurrence.hdf5"
hd5_store = pandas.HDFStore(cooccurrence_matrix_file, mode='r')
cooccurrence_matrix = hd5_store['corpus_matrix']
hd5_store.close()
# print first 10 entries
print("cooccurrence_matrix_file ", cooccurrence_matrix_file)
print(cooccurrence_matrix.head(10))
pass
# create word cooccurrence matrix just for one document, updated to extend beyond immediate neighbour
def create_cooccurrence_matrix_for_document(content_directory, document_name, doc_words_list, window):
# start with empty matrix
cooccurrence_matrix = pandas.DataFrame()
# work along window
for ci in range(1, window + 1):
# first create word-pair list
word_pair_list = zip(doc_words_list[:-ci], doc_words_list[ci:])
# counts for each pair
word_pair_ctr = collections.Counter(word_pair_list)
for wp, c in word_pair_ctr.items():
neighbour_factor = math.exp(- math.pow(ci / window,2))
# this try-exceptis ugly, needed because pandas doesn't yet have df[wp] += ...
try:
cooccurrence_matrix.ix[wp] += (c * neighbour_factor)
except KeyError:
cooccurrence_matrix.ix[wp] = (c * neighbour_factor)
pass
# replaces any created NaNs with zeros
cooccurrence_matrix.fillna(0, inplace=True)
pass
pass
# finally save matrix
cooccurrence_matrix_file = content_directory + document_name + "_matrix.cooccurrence"
hd5_store = pandas.HDFStore(cooccurrence_matrix_file, mode='w')
hd5_store['doc_matrix'] = cooccurrence_matrix
hd5_store.close()
pass
# merge document matrices into a single matrix for the corpus
def merge_cooccurrence_matrices_for_corpus(content_directory):
# start with empty matrix
cooccurrence_matrix = pandas.DataFrame()
# list of text files
list_of_matrix_files = glob.glob(content_directory + "*_matrix.cooccurrence")
# load each matrix file and merge into accummulating corpus matrix
for document_matrix_file in list_of_matrix_files:
print(document_matrix_file)
hd5_store = pandas.HDFStore(document_matrix_file, mode='r')
temporary_document_matrix = hd5_store['doc_matrix']
hd5_store.close()
cooccurrence_matrix = cooccurrence_matrix.add(temporary_document_matrix, fill_value=0)
# remove document index after merging
os.remove(document_matrix_file)
pass
# replace NaN wirh zeros
cooccurrence_matrix.fillna(0, inplace=True)
# finally save matrix
corpus_matrix_file = content_directory + "matrix_cooccurrence.hdf5"
print("saving corpus co-occurrence matrix ... ", corpus_matrix_file)
hd5_store = pandas.HDFStore(corpus_matrix_file, mode='w')
hd5_store['corpus_matrix'] = cooccurrence_matrix
hd5_store.close()
pass
# query co-occurrence matrix
def query_cooccurance_matrix(content_directory, word1, word2):
# open matrix file
cooccurrence_matrix_file = content_directory + "matrix_cooccurrence.hdf5"
hd5_store1 = pandas.HDFStore(cooccurrence_matrix_file, mode='r')
cooccurrence_matrix = hd5_store1['corpus_matrix']
hd5_store1.close()
# query matrix and return
return cooccurrence_matrix.ix[word1, word2]
# query co-occurrence matrix
def most_likely_next(content_directory, word1):
# open matrix file
cooccurrence_matrix_file = content_directory + "matrix_cooccurrence.hdf5"
hd5_store1 = pandas.HDFStore(cooccurrence_matrix_file, mode='r')
cooccurrence_matrix = hd5_store1['corpus_matrix']
hd5_store1.close()
# query matrix and return index with max cooccurrence value
return cooccurrence_matrix.loc[word1].idxmax()
# get words ordered by cooccurrence (across all documents)
def get_word_pairs_by_cooccurrence(content_directory):
# open matrix file
cooccurrence_matrix_file = content_directory + "matrix_cooccurrence.hdf5"
hd5_store1 = pandas.HDFStore(cooccurrence_matrix_file, mode='r')
cooccurrence_matrix = hd5_store1['corpus_matrix']
hd5_store1.close()
# to find max we need to unstack (unpack 2d matrix into 1d list)
unstacked_cooccurrence_matrix = cooccurrence_matrix.T.unstack()
# remove the zero occurances
unstacked_cooccurrence_matrix = unstacked_cooccurrence_matrix[unstacked_cooccurrence_matrix>0]
# sort by co-occurance value
unstacked_cooccurrence_matrix.sort_values(ascending=False, inplace=True)
# convert to pandas dataframe with word1, word2, weight columns
word1_word2_weight_list = [ (w1, w2, unstacked_cooccurrence_matrix.ix[w1,w2]) for (w1,w2) in unstacked_cooccurrence_matrix.index.values]
word1_word2_weight = pandas.DataFrame(word1_word2_weight_list, columns=["word1", "word2", "weight"])
# normalise weight to 0-1
word1_word2_weight['weight'] /= word1_word2_weight['weight'].max()
# return dataframe
return word1_word2_weight
|
gpl-2.0
|
gfyoung/pandas
|
pandas/tests/reshape/concat/test_append_common.py
|
2
|
28180
|
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series
import pandas._testing as tm
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
def setup_method(self, method):
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
self.data = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
if label == "bool":
assert obj.dtype == "object"
else:
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self):
# to confirm test case covers intended dtypes
for typ, vals in self.data.items():
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self):
# GH 13660
for typ1, vals1 in self.data.items():
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1).append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1).append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1.append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1.append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1).append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1).append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self):
# GH 13660
for typ1, vals1 in self.data.items():
for typ2, vals2 in self.data.items():
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
continue
elif typ1 == "category" or typ2 == "category":
# TODO: suspicious
continue
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series.append
res = Series(vals1).append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1).append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts.append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1.append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1.append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1.append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
# tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1.append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1.append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1.append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1.append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds.append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = Series([2, 1, 2, 1, 2, np.nan], dtype="object")
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1])
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# completely different categories => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([1, 3, 2])
exp = Series([10, 11, np.nan, 1, 3, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = Series([1, 3, 2, 10, 11, np.nan], dtype="object")
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series(["a", "b", "c"])
exp = Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = Series([10, 11], dtype="category")
s2 = Series([np.nan, np.nan, np.nan])
exp = Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
s3 = Series([1, 2, 1, 2, np.nan])
exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([1, 3, 4])
exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([10, 11, 12])
exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
s1 = Series([1, 3], dtype="category")
s2 = Series([3, 4], dtype="category")
s3 = Series([2, 3])
s4 = Series([2, 2], dtype="category")
s5 = Series([1, np.nan])
s6 = Series([1, 3, 2], dtype="category")
# mixed dtype, values are all in categories => not-category
exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s1.append([s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s6.append([s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
def test_concat_categorical_ordered(self):
# GH 13524
s1 = Series(Categorical([1, 2, np.nan], ordered=True))
s2 = Series(Categorical([2, 1, 2], ordered=True))
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s1], ignore_index=True), exp)
def test_concat_categorical_coercion_nan(self):
# GH 13524
# some edge cases
# category + not-category => not category
s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
s2 = Series([np.nan, 1])
exp = Series([np.nan, np.nan, np.nan, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
s1 = Series([1, np.nan], dtype="category")
s2 = Series([np.nan, np.nan])
exp = Series([1, np.nan, np.nan, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# mixed dtype, all nan-likes => not-category
s1 = Series([np.nan, np.nan], dtype="category")
s2 = Series([np.nan, np.nan])
exp = Series([np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all category nan-likes => category
s1 = Series([np.nan, np.nan], dtype="category")
s2 = Series([np.nan, np.nan], dtype="category")
exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_concat_categorical_empty(self):
# GH 13524
s1 = Series([], dtype="category")
s2 = Series([1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
s1 = Series([], dtype="category")
s2 = Series([], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
s1 = Series([], dtype="category")
s2 = Series([], dtype="object")
# different dtype => not-category
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
s1 = Series([], dtype="category")
s2 = Series([np.nan, np.nan])
# empty Series is ignored
exp = Series([np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
|
bsd-3-clause
|
TomSkelly/MatchAnnot
|
mapAnnot.py
|
1
|
2564
|
#!/usr/bin/env python
# Read annotation file, print counts, plot transcript sizes;
import os
import sys
import optparse
import re # regular expressions
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import Annotations as anno
VERSION = '20140930.01'
DEF_OUTPUT = 'transcript_sizes.png'
DEF_XMAX = 4000
DEF_TITLE = 'Transcript lengths in annotation file'
SIZE_BINS = (1, 3, 5, 8, 9999999)
SIZE_COLORS = ('orange', 'red', 'yellow', 'green', 'blue')
SIZE_LEGENDS = ('1 exon', '2/3 exons', '4/5 exons', '6/7/8 exons', '8 exons')
def main ():
opt, args = getParms()
gtf = args[0]
annotList = anno.AnnotationList (gtf)
tranSizes = list()
for ix in xrange(len(SIZE_BINS)):
tranSizes.append (list())
for chr in annotList.chromosomes():
for strand in annotList.strands(chr):
for geneEnt in annotList.geneList(chr, strand).getChildren():
for tranEnt in geneEnt.getChildren():
if tranEnt.length > 100000:
print '%-5s %s %2d %5d %s' % (chr, strand, tranEnt.numChildren(), tranEnt.length, tranEnt.name)
for ix, size in enumerate(SIZE_BINS):
if tranEnt.numChildren() <= size:
tranSizes[ix].append (tranEnt.length)
break
plt.figure (figsize=(12, 6))
counts, bins, patches = plt.hist(tranSizes, bins=80, range=(0,opt.xmax), rwidth=0.8, color=SIZE_COLORS, histtype='barstacked', label=SIZE_LEGENDS)
plt.legend(loc='best', prop={'size':10})
plt.xlabel('transcript length')
plt.ylabel('number of transcripts')
if opt.title is not None:
plt.suptitle(opt.title)
plt.savefig (opt.output)
plt.close()
print counts
print bins
print patches
def getParms (): # use default input sys.argv[1:]
parser = optparse.OptionParser(usage='%prog [options] <fasta_file> ... ')
parser.add_option ('--output', help='Output file name (def: %default)')
parser.add_option ('--xmax', type='int', help='Maximum transcript length (x-axis) to plot (def: %default)')
parser.add_option ('--title', help='Title for top of figure (def: %default)')
parser.set_defaults (output=DEF_OUTPUT,
xmax=DEF_XMAX,
title=DEF_TITLE,
)
opt, args = parser.parse_args()
return opt, args
if __name__ == "__main__":
main()
|
gpl-3.0
|
elijah513/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
205
|
5878
|
import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
|
bsd-3-clause
|
bundgus/python-playground
|
matplotlib-playground/examples/pylab_examples/contour_image.py
|
2
|
3493
|
#!/usr/bin/env python
'''
Test combinations of contouring, filled contouring, and image plotting.
For contour labelling, see contour_demo.py.
The emphasis in this demo is on showing how to make contours register
correctly on images, and on how to get both of them oriented as
desired. In particular, note the usage of the "origin" and "extent"
keyword arguments to imshow and contour.
'''
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import mlab, cm
# Default delta is large because that makes it fast, and it illustrates
# the correct registration between image and contours.
delta = 0.5
extent = (-3, 4, -4, 3)
x = np.arange(-3.0, 4.001, delta)
y = np.arange(-4.0, 3.001, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = (Z1 - Z2) * 10
levels = np.arange(-2.0, 1.601, 0.4) # Boost the upper limit to avoid truncation errors.
norm = cm.colors.Normalize(vmax=abs(Z).max(), vmin=-abs(Z).max())
cmap = cm.PRGn
plt.figure()
plt.subplot(2, 2, 1)
cset1 = plt.contourf(X, Y, Z, levels,
cmap=cm.get_cmap(cmap, len(levels) - 1),
norm=norm,
)
# It is not necessary, but for the colormap, we need only the
# number of levels minus 1. To avoid discretization error, use
# either this number or a large number such as the default (256).
# If we want lines as well as filled regions, we need to call
# contour separately; don't try to change the edgecolor or edgewidth
# of the polygons in the collections returned by contourf.
# Use levels output from previous call to guarantee they are the same.
cset2 = plt.contour(X, Y, Z, cset1.levels,
colors='k',
hold='on')
# We don't really need dashed contour lines to indicate negative
# regions, so let's turn them off.
for c in cset2.collections:
c.set_linestyle('solid')
# It is easier here to make a separate call to contour than
# to set up an array of colors and linewidths.
# We are making a thick green line as a zero contour.
# Specify the zero level as a tuple with only 0 in it.
cset3 = plt.contour(X, Y, Z, (0,),
colors='g',
linewidths=2,
hold='on')
plt.title('Filled contours')
plt.colorbar(cset1)
#hot()
plt.subplot(2, 2, 2)
plt.imshow(Z, extent=extent, cmap=cmap, norm=norm)
v = plt.axis()
plt.contour(Z, levels, hold='on', colors='k',
origin='upper', extent=extent)
plt.axis(v)
plt.title("Image, origin 'upper'")
plt.subplot(2, 2, 3)
plt.imshow(Z, origin='lower', extent=extent, cmap=cmap, norm=norm)
v = plt.axis()
plt.contour(Z, levels, hold='on', colors='k',
origin='lower', extent=extent)
plt.axis(v)
plt.title("Image, origin 'lower'")
plt.subplot(2, 2, 4)
# We will use the interpolation "nearest" here to show the actual
# image pixels.
# Note that the contour lines don't extend to the edge of the box.
# This is intentional. The Z values are defined at the center of each
# image pixel (each color block on the following subplot), so the
# domain that is contoured does not extend beyond these pixel centers.
im = plt.imshow(Z, interpolation='nearest', extent=extent, cmap=cmap, norm=norm)
v = plt.axis()
plt.contour(Z, levels, hold='on', colors='k',
origin='image', extent=extent)
plt.axis(v)
ylim = plt.get(plt.gca(), 'ylim')
plt.setp(plt.gca(), ylim=ylim[::-1])
plt.title("Image, origin from rc, reversed y-axis")
plt.colorbar(im)
plt.show()
|
mit
|
ysasaki6023/NeuralNetworkStudy
|
cifar02/Output/Ksize211_L5_1/train.py
|
24
|
10142
|
#!/usr/bin/env python
import argparse
import time
import numpy as np
import six
import os
import shutil
import chainer
from chainer import computational_graph
from chainer import cuda
import chainer.links as L
import chainer.functions as F
from chainer import optimizers
from chainer import serializers
from chainer.utils import conv
import cPickle as pickle
import matplotlib.pyplot as plt
import matplotlib.cm
class ImageProcessNetwork(chainer.Chain):
def __init__(self,
I_colors, I_Xunit, I_Yunit, F_unit,
N_PLayers = 5,
P0C_feature = 3,
P1C_feature = 3,
P2C_feature = 3,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 2,
P2P_ksize = 2,
L1_dropout = 0.5,
L2_dropout = 0.5,
L2_unit = 256,
):
super(ImageProcessNetwork, self).__init__()
self.IsTrain = True
self.NPLayers = N_PLayers
self.NFeatures = [I_colors]
self.NFilter = [1]
self.NKsize = [1]
self.NImgPix = [(I_Xunit,I_Yunit)]
self.L1_dropout = L1_dropout
self.L2_dropout = L2_dropout
self.L2_unit = L2_unit
for iL in range(self.NPLayers):
## Set Variables
self.NFeatures.append(self.gradualVariable(iL,self.NPLayers,P0C_feature,P1C_feature,P2C_feature))
self.NFilter.append( self.gradualVariable(iL,self.NPLayers,P0C_filter ,P1C_filter ,P2C_filter ))
self.NKsize.append( self.gradualVariable(iL,self.NPLayers,P0P_ksize ,P1P_ksize ,P2P_ksize ))
## Update layers
self.NImgPix.append(
( conv.get_conv_outsize( self.NImgPix[-1][0], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True),
conv.get_conv_outsize( self.NImgPix[-1][1], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True)))
self.add_link("P%d"%iL,L.Convolution2D( self.NFeatures[-2], self.NFeatures[-1],
self.NFilter[-1] , pad=int(self.NFilter[-1]/2.)))
self.add_link("L1",L.Linear( self.NImgPix[-1][0] * self.NImgPix[-1][1] * self.NFeatures[-1] , L2_unit))
self.add_link("L2",L.Linear( L2_unit, F_unit))
return
def gradualVariable(self, cLayer, tLayer, val0, val1, val2):
pos = 0.5
if cLayer <= int(pos*tLayer): v0, v1, p0, p1, pc = val0, val1, 0, int(pos*tLayer), int( cLayer - 0 )
else : v0, v1, p0, p1, pc = val1, val2, int(pos*tLayer), tLayer-1, int( cLayer - int(pos*tLayer))
return int(float(v0) + (float(v1)-float(v0))/(float(p1)-float(p0))*float(pc))
def setTrainMode(self, IsTrain):
self.IsTrain = IsTrain
return
def __call__(self, x):
h = x
for iL in range(self.NPLayers):
h = self.__dict__["P%d"%iL](h)
h = F.local_response_normalization(h)
h = F.max_pooling_2d(F.relu(h), ksize=self.NKsize[iL+1], cover_all=True)
h = F.dropout(F.relu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain)
h = F.dropout(F.relu(self.L2(h)),ratio=self.L2_dropout,train=self.IsTrain)
y = h
return y
def CifarAnalysis(folderName=None,n_epoch=1,batchsize = 1000, **kwd):
id_gpu = 0
OutStr = ""
OutStr += 'GPU: {}\n'.format(id_gpu)
OutStr += 'Minibatch-size: {}\n'.format(batchsize)
OutStr += 'epoch: {}\n'.format(n_epoch)
OutStr += 'kwd: {}\n'.format(kwd)
OutStr += ''
print OutStr
fOutput = None
fInfo = None
if folderName:
if not os.path.exists(folderName):
os.makedirs(folderName)
fOutput = open(os.path.join(folderName,"output.dat"),"w")
fInfo = open(os.path.join(folderName,"info.dat"),"w")
shutil.copyfile(__file__,os.path.join(folderName,os.path.basename(__file__)))
if fInfo: fInfo.write(OutStr)
# Prepare dataset
InDataBatch = []
data_tr = np.zeros((50000,3*32*32),dtype=np.float32)
data_ev = np.zeros((10000,3*32*32),dtype=np.float32)
label_tr = np.zeros((50000),dtype=np.int32)
label_ev = np.zeros((10000),dtype=np.int32)
for i in range(1,5+1):
with open("data_cifar10/data_batch_%d"%i,"r") as f:
tmp = pickle.load(f)
data_tr [(i-1)*10000:i*10000] = tmp["data"]
label_tr[(i-1)*10000:i*10000] = tmp["labels"]
with open("data_cifar10/test_batch","r") as f:
tmp = pickle.load(f)
data_ev [:] = tmp["data"]
label_ev [:] = tmp["labels"]
## Prep
print "Normalizing data ..."
def Normalize(x):
avg = np.average(x,axis=1).reshape((len(x),1))
std = np.sqrt(np.sum(x*x,axis=1) - np.sum(x,axis=1)).reshape((len(x),1))
y = (x - avg) / std
return y
data_tr = Normalize(data_tr)
data_ev = Normalize(data_ev)
x_tr = data_tr.reshape((len(data_tr),3,32,32))
x_ev = data_ev.reshape((len(data_ev),3,32,32))
y_tr = label_tr
y_ev = label_ev
N_tr = len(data_tr) # 50000
N_ev = len(data_ev) # 10000
## Define analisis
Resume = None
if "Resume" in kwd:
Resume = kwd["Resume"]
del kwd["Resume"]
model = L.Classifier(ImageProcessNetwork(I_colors=3, I_Xunit=32, I_Yunit=32, F_unit = 10, **kwd))
if id_gpu >= 0:
cuda.get_device(id_gpu).use()
model.to_gpu()
xp = np if id_gpu < 0 else cuda.cupy
# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)
# Init/Resume
if Resume:
print('Load optimizer state from', Resume)
serializers.load_hdf5(Resume+".state", optimizer)
serializers.load_hdf5(Resume+".model", model)
# Learning loop
if fOutput: fOutput.write("epoch,mode,loss,accuracy\n")
for epoch in six.moves.range(1, n_epoch + 1):
print 'epoch %d'%epoch
# training
perm = np.random.permutation(N_tr)
sum_accuracy = 0
sum_loss = 0
start = time.time()
for i in six.moves.range(0, N_tr, batchsize):
x = chainer.Variable(xp.asarray(x_tr[perm[i:i + batchsize]]))
t = chainer.Variable(xp.asarray(y_tr[perm[i:i + batchsize]]))
# Pass the loss function (Classifier defines it) and its arguments
model.predictor.setTrainMode(True)
optimizer.update(model, x, t)
if (epoch == 1 and i == 0) and folderName:
with open(os.path.join(folderName,'graph.dot'), 'w') as o:
g = computational_graph.build_computational_graph(
(model.loss, ))
o.write(g.dump())
print 'graph generated'
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
end = time.time()
elapsed_time = end - start
throughput = N_tr / elapsed_time
print 'train mean loss=%.3f, accuracy=%.1f%%, throughput=%.0f images/sec'%(sum_loss / N_tr, sum_accuracy / N_tr * 100., throughput)
if fOutput: fOutput.write("%d,Train,%e,%e\n"%(epoch,sum_loss/N_tr,sum_accuracy/N_tr))
# evaluation
perm = np.random.permutation(N_ev)
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N_ev, batchsize):
x = chainer.Variable(xp.asarray(x_ev[perm[i:i + batchsize]]),volatile='on')
t = chainer.Variable(xp.asarray(y_ev[perm[i:i + batchsize]]),volatile='on')
model.predictor.setTrainMode(False)
loss = model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
print 'test mean loss=%.3f, accuracy=%.1f%%'%(sum_loss / N_ev, sum_accuracy / N_ev * 100, )
if fOutput: fOutput.write("%d,Test,%e,%e\n"%(epoch,sum_loss/N_ev,sum_accuracy/N_ev))
if folderName and (epoch%10 == 0 or epoch==n_epoch):
# Save the model and the optimizer
if epoch == n_epoch:
myFname = os.path.join(folderName,'mlp_final')
else:
myFname = os.path.join(folderName,'mlp_%d'%n_epoch)
#print 'save the model'
serializers.save_hdf5(myFname+".model", model)
serializers.save_hdf5(myFname+".state", optimizer)
if fOutput: fOutput.close()
if fInfo : fInfo.close()
if __name__=="__main__":
n_epoch = 200
for i in range(10):
for l in [6,5,4,3]:
CifarAnalysis("Output/Ksize222_L%d_%d"%(l,i),
n_epoch=n_epoch,
batchsize = 1000,
N_PLayers = l,
P0C_feature = 32,
P1C_feature = 16,
P2C_feature = 16,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 2,
P2P_ksize = 2,
L1_dropout = 0.5,
L2_dropout = 0.0,
L2_unit = 100)
CifarAnalysis("Output/Ksize211_L%d_%d"%(l,i),
n_epoch=n_epoch,
batchsize = 1000,
N_PLayers = l,
P0C_feature = 32,
P1C_feature = 16,
P2C_feature = 16,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 1,
P2P_ksize = 1,
L1_dropout = 0.5,
L2_dropout = 0.0,
L2_unit = 100)
|
mit
|
kipoi/kipoiseq
|
setup.py
|
1
|
1505
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
requirements = [
"kipoi>=0.5.5",
# "genomelake",
"pyfaidx",
"numpy",
"pandas",
"tqdm",
# "colorlog",
# "related>=0.6.0",
# sometimes required
# "h5py",
"gffutils",
"kipoi-utils>=0.1.1",
"kipoi-conda>=0.1.0",
"pyranges"
]
test_requirements = [
"bumpversion",
"wheel",
"epc",
"jedi",
"pytest>=3.3.1",
"pytest-xdist", # running tests in parallel
"pytest-pep8", # see https://github.com/kipoi/kipoi/issues/91
"pytest-mock",
"pytest-cov",
"coveralls",
"scikit-learn",
"cython",
"cyvcf2",
"pyranges>=0.0.71",
# "genomelake",
"keras",
"tensorflow",
"pybedtools",
"concise"
]
setup(
name='kipoiseq',
version='0.6.0',
description="kipoiseq: sequence-based data-loaders for Kipoi",
author="Kipoi team",
author_email='[email protected]',
url='https://github.com/kipoi/kipoiseq',
long_description="kipoiseq: sequence-based data-loaders for Kipoi",
packages=find_packages(),
install_requires=requirements,
extras_require={
"develop": test_requirements,
},
license="MIT license",
zip_safe=False,
keywords=["model zoo", "deep learning",
"computational biology", "bioinformatics", "genomics"],
test_suite='tests',
include_package_data=False,
tests_require=test_requirements,
python_requires='>=3.6'
)
|
mit
|
ViennaRNA/forgi
|
forgi/threedee/model/_ensemble.py
|
1
|
29109
|
#!/usr/bin/python
"""
This code is still experimental
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip, object) # future package
from collections import Mapping, defaultdict, Sequence
import itertools as it
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.manifold import MDS
from sklearn.decomposition import PCA
import time
import forgi.threedee.model.descriptors as ftmd
import forgi.threedee.utilities.vector as ftuv
import scipy.stats
import matplotlib.pyplot as plt
import warnings
from scipy.sparse import lil_matrix
import forgi.threedee.model.similarity as ftms
import pandas as pd
import logging
log = logging.getLogger(__name__)
np_nans = lambda *args, **kwargs: np.ones(*args, **kwargs) * np.nan
class RMSDMatrix(Sequence):
def __init__(self, shape):
self._matrix = lil_matrix(shape, dtype=float)
self._len = shape[0]
def __setitem__(self, key, value):
try:
log.info("Setting {} to array with shape {}".format(key, value.shape))
except:
pass
self._matrix[key] = value + 1
def __getitem__(self, key):
return self._matrix[key] - 1
def __len__(self):
return self._len
class Ensemble(Mapping):
# INITIALIZATION
def __init__(self, cgs, reference_cg=None, sort_key=None):
"""
An Ensemble is a sequence of Coarse grained RNAs, all of which must correspond
to the same RNA 2D structure.
:param cgs: An iterable of coarse grain RNAs or a mapping key->cg,
from which the ensemble is constructed.
The ensemble may keep references to some of the cgs and
modify them by centering them on the centroid.
The ensemble is not guaranteed to keep/copy/reference any of the
cg's properties except the coords and twists.
:param sort_key: Optional. A function that takes a cg (if cgs is a sequence)
or a key (if cgs is a mapping)
"""
self._reference_cg = reference_cg
self._cgs = []
self._cg_lookup = {}
# The order of cgs, as specified by sort_key
self._cg_sequence = []
self._cg_rev_lookup = defaultdict(list)
if isinstance(cgs, Mapping):
for key in sorted(cgs.keys(), key=sort_key):
self._add_to_cg_list(cgs[key], key)
else:
if sort_key is not None:
ids = list(
sorted(range(len(cgs)), key=lambda x: sort_key(cgs[x])))
else:
ids = list(range(len(cgs)))
for key in ids:
self._add_to_cg_list(cgs[key], key)
# Center all cg coords on their centroid
for cg in self._cgs:
cg.coords.center()
if self._reference_cg is not None:
self._reference_cg.coords.center()
############## Caching of some descriptors ############################
# The rmsd matrix. nan means the value needs to be calculated and will then be stored here.
self._rmsd = RMSDMatrix((len(self._cgs), len(self._cgs)))
for i in range(len(self._rmsd)):
self._rmsd[i, i] = 0.0
# 1D descriptors
self._descriptors = {}
def _get_descriptor(self, descr_name):
if descr_name not in self._descriptors:
self._descriptors[descr_name] = calculate_descriptor_for(descr_name,
self._cgs,
*self._get_args_for(descr_name))
# if descr_name == "rmsd_to_last":
# self._rmsd[-1,:]=self._descriptors[descr_name]
# self._rmsd[:, -1]=self._descriptors[descr_name]
return self._descriptors[descr_name]
def _add_to_cg_list(self, cg, key):
"""
During construction of the ensemble, this is used.
Save some bookkeeping variables and store the cg,
if it is not identical to the previouse.
:param cg: The coarse grained RNA
:param key: The index or name under which this cg can be retrieved again.
"""
log.debug("Adding cg {} with key {}".format(cg.name, key))
# In the future, we might have to check the rmsd in addition,
# if the MCMC will allow rotation/translation
if not self._cgs or cg.coords != self._cgs[-1].coords or cg.twists != self._cgs[-1].twists:
if self._cgs:
log.debug("Coords or twists different!")
else:
log.debug("First cg added")
self._cgs.append(cg)
else:
log.debug("It is the same as the previous")
# Else: only store lookup entry pointing to previous cg
self._cg_lookup[key] = len(self._cgs) - 1
self._cg_rev_lookup[len(self._cgs) - 1].append(key)
self._cg_sequence.append(len(self._cgs) - 1)
# MEMBER LOOKUP
def __getitem__(self, key):
"""Using a key like used upon ensemble construction, retrieve the corresponding element"""
return self._cgs[self._cg_lookup[key]]
def __iter__(self):
return iter(self._cg_lookup.keys())
def __len__(self):
return len(self._cgs)
def at_timestep(self, timestep):
"""
Access an ensemble member by the timestep.
In contrast to __getitem__, which uses the index supplied upon ensemble creation,
this uses the sort order specified by sort_key in the __init__ function.
It is useful if the ensemble is seen as a trajectory.
This method gives the ith frame of this trajectory.
:param timestep: The number of the frame (cg) in the trajectory that should be retrieved.
:returns: A coarse-grained RNA.
"""
if hasattr(timestep, "__len__") and len(timestep) <= 3:
seq = []
for i in range(*timestep):
seq.append(self._cgs[self._cg_sequence[i]])
return seq
return self._cgs[self._cg_sequence[timestep]]
# RMSD and RMSD based calculations
def rmsd_between(self, key1, key2, mode="key"):
"""
Return (and cache) the rmsd between two structures.
:param key1, key2: Two keys to reference two cgs
:param mode: "key" or "timestep". Whether the keys are timesteps or the keys
used by __getitem__
:returns: the rmsd as float
"""
if mode == "key":
i = self._cg_lookup[key1]
j = self._cg_lookup[key2]
elif mode == "timestep":
i = self._cg_sequence[key1]
j = self._cg_sequence[key2]
else:
raise ValueError("Invalid mode {}".format(mode))
if self._rmsd[i, j] < 0:
self._rmsd[i, j] = self._rmsd[j, i] = self._cgs[i].coords.rmsd_to(
self._cgs[j].coords)
return self._rmsd[i, j]
def _calculate_complete_rmsd_matrix(self):
"""
Fill out all empty fields in the rmsd matrix.
"""
if np.any(self._rmsd < 0):
# print(np.where(np.isnan(self._rmsd)))
log.info("Starting complete rmsd calculation at {}".format(time.time()))
for i, j in it.combinations(range(len(self)), 2):
if self._rmsd[i, j] < 0:
self._rmsd[i, j] = self._rmsd[j, i] = self._cgs[i].coords.rmsd_to(
self._cgs[j].coords)
log.info("Finished complete rmsd calculation at {}".format(time.time()))
def _cluster_dbscan(self):
""""
Cluster all structures based on the DBSCAN algorithm
using the pairwise RMSD as distance.
"""
self._calculate_complete_rmsd_matrix()
db = DBSCAN(eps=np.mean(
self._rmsd[0]) / 3, min_samples=2, metric="precomputed").fit(self._rmsd)
return db
def _get_args_for(self, descriptor_name):
"""
Get the arguments that are required to calculate the given descriptor of the ensemble's cgs.
:param descriptor_name: The name of the descriptor to be calculated.
"""
if descriptor_name == "rmsd_to_reference":
if self._reference_cg:
return [self._reference_cg]
else:
return [self._cgs[0]]
elif descriptor_name == "rmsd_to_last":
return [self._cgs[-1]]
else:
return []
def view_delta_rmsd_vs_steps(self):
self._calculate_complete_rmsd_matrix()
fig, axes = plt.subplots(2)
a_rmsd = np_nans(len(self._cg_sequence) // 2)
min_rmsd = np_nans(len(self._cg_sequence) // 2)
max_rmsd = np_nans(len(self._cg_sequence) // 2)
for d in range(len(a_rmsd)):
l = [self._rmsd[self._cg_sequence[i], self._cg_sequence[i + d]]
for i in range(len(self._cg_sequence) - d)]
a_rmsd[d] = sum(l) / len(l)
min_rmsd[d] = min(l)
max_rmsd[d] = max(l)
for ax in axes:
ax.set_xlabel("Steps apart")
ax.set_ylabel("Average RMSD")
ax.plot(list(range(len(a_rmsd))), a_rmsd, label="Average RMSD")
ax.plot(list(range(len(min_rmsd))), min_rmsd, label="Minimal RMSD")
ax.plot(list(range(len(max_rmsd))), max_rmsd, label="Maximal RMSD")
ax.plot([0, len(max_rmsd)], [np.max(self._rmsd), np.max(
self._rmsd)], "-.", label="Maximal RMSD in whole simulation")
ax.plot([0, len(max_rmsd)], [np.mean(self._rmsd), np.mean(
self._rmsd)], "-.", label="Average RMSD in whole simulation")
ax.legend(prop={'size': 6})
axes[1].set_xlim([0, 50])
plt.savefig("rmsd_steps_apart_{}.svg".format(self._cgs[0].name))
plt.clf()
plt.close()
def view_2d_embedding(self, reference=None):
# http://baoilleach.blogspot.co.at/2014/01/convert-distance-matrix-to-2d.html
if reference is None:
# First cluster all structures based on pairwise RMSD
db = self._cluster_dbscan()
labels = db.labels_
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
unique_labels = set(labels)
# Then calculate the 2D coordinates for our embedding
mds = MDS(n_components=2,
dissimilarity="precomputed", random_state=6)
results = mds.fit(self._rmsd)
coords = results.embedding_
# Now plot
plt.plot(coords[:, 0], coords[:, 1], '-', color="blue")
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
plt.plot(coords[:, 0][class_member_mask & core_samples_mask],
coords[:, 1][class_member_mask & core_samples_mask],
'o', markerfacecolor=col, markeredgecolor='k', markersize=6
)
plt.plot(coords[:, 0][class_member_mask & ~core_samples_mask],
coords[:, 1][class_member_mask & ~core_samples_mask],
'o', markerfacecolor=col, markeredgecolor=col, markersize=1
)
plt.savefig("embedding_{}.svg".format(self._cgs[0].name))
plt.clf()
plt.close()
else:
# Create a huge distance matrix
alldists = np.zeros(
((len(self._cgs) + len(reference) + 1), (len(self._cgs) + len(reference) + 1)))
for i, j in it.combinations(range(len(alldists)), 2):
if i < len(self._cgs):
cg1 = self._cgs[i]
elif i < len(self._cgs) + len(reference):
cg1 = reference[i - len(self._cgs)]
else:
assert i == len(self._cgs) + len(reference)
cg1 = self._reference_cg
if j < len(self._cgs):
cg2 = self._cgs[j]
elif j < len(self._cgs) + len(reference):
cg2 = reference[j - len(self._cgs)]
else:
assert j == len(self._cgs) + len(reference)
cg2 = self._reference_cg
alldists[i, j] = alldists[j, i] = ftms.cg_rmsd(cg1, cg2)
# Then calculate the 2D coordinates for our embedding
mds = MDS(n_components=2,
dissimilarity="precomputed", random_state=6)
results = mds.fit(alldists)
coords = results.embedding_
# Now plot
plt.plot(coords[len(self._cgs):len(self._cgs) + len(reference), 0],
coords[len(self._cgs):len(self._cgs) + len(reference), 1], 's', color="green")
plt.plot(coords[:len(self._cgs), 0],
coords[:len(self._cgs), 1], '-o', color="blue")
plt.plot([coords[-1, 0]], [coords[-1, 1]], 's', color="red")
plt.savefig("embedding1_{}.svg".format(self._cgs[0].name))
plt.clf()
plt.close()
def color_by_energy(self, bins, ref_ensemble=None, ref_energies=None,
x="rmsd_to_reference", y="rmsd_to_last"):
""""""
plt.set_cmap('jet_r')
# Get the data for plotting of the ensemble
data_x = self._get_descriptor(x)
data_y = self._get_descriptor(y)
energies = self._get_descriptor("info_energy")
image = get_energy_image(data_x, data_y, energies, bins)
# Label the plot
plt.xlabel(x)
plt.ylabel(y)
plt.imshow(image.T, interpolation="nearest", origin='lower',
extent=[bins[0][0], bins[0][-1], bins[1][0], bins[1][-1]],
aspect='auto')
plt.xlim([bins[0][0], bins[0][-1]])
plt.ylim([bins[1][0], bins[1][-1]])
plt.colorbar()
figname = "minEnergy_{}_{}_{}.svg".format(self._cgs[0].name, x, y)
plt.savefig(figname)
log.info("Figure {} created".format(figname))
plt.clf()
plt.close()
if ref_ensemble and ref_energies:
# Get the data for plotting of the ensemble
ref_x = calculate_descriptor_for(
x, ref_ensemble, *self._get_args_for(x))
ref_y = calculate_descriptor_for(
y, ref_ensemble, *self._get_args_for(y))
image = get_energy_image(ref_x, ref_y, ref_energies, bins)
# Label the plot
plt.xlabel(x)
plt.ylabel(y)
plt.imshow(image.T, interpolation="nearest", origin='lower',
extent=[bins[0][0], bins[0][-1],
bins[1][0], bins[1][-1]],
aspect='auto')
plt.xlim([bins[0][0], bins[0][-1]])
plt.ylim([bins[1][0], bins[1][-1]])
plt.colorbar()
figname = "minEnergy_reference_{}_{}_{}.svg".format(
self._cgs[0].name, x, y)
plt.savefig(figname)
log.info("Figure {} created".format(figname))
plt.clf()
plt.close()
plt.set_cmap('jet')
def view_2d_hist(self, ref_ensemble=None, x="rmsd_to_reference", y="rmsd_to_last", bins=None):
"""
Plot a 2D histogram of the ensemble with respect to the given x and y axis,
and visualize the results of clustering with DBSCAN.
:param ref_ensemble: An ensemble or a list of cgs. Plotted as additional image.
:param x: A STRING. The descriptor name used as x axis.
:param y: A STRING. The descriptor name used as y axis.
:param bins: Passed to matplotlib.pyplot.hist2d
:returns: The bins used
Saves the resulting plot as a svg in the current directory.
"""
# Get the data for plotting of the ensemble
data_x = self._get_descriptor(x)
data_y = self._get_descriptor(y)
if ref_ensemble:
# Label the plot
plt.xlabel(x)
plt.ylabel(y)
# Get the data for plotting of the ensemble
ref_x = calculate_descriptor_for(
x, ref_ensemble, *self._get_args_for(x))
ref_y = calculate_descriptor_for(
y, ref_ensemble, *self._get_args_for(y))
if bins is None:
# Calculate the bins, using reference and ensemble
_, xedges, yedges = np.histogram2d(
list(ref_x) + list(data_x), list(ref_y) + list(data_y), bins=40, normed=True)
bins = [xedges, yedges]
# Plot the 2D histogram
plt.hist2d(ref_x, ref_y, bins=bins, normed=True)
plt.colorbar()
figname = "hist2d_reference_{}_{}_{}.svg".format(
ref_ensemble[0].name, x, y)
plt.savefig(figname)
log.info("Figure {} created".format(figname))
plt.clf()
plt.close()
if bins is None:
bins = 40
# Label the plot
plt.xlabel(x)
plt.ylabel(y)
# Plot the 2D histogram
_, xedges, yedges, _ = plt.hist2d(
data_x, data_y, bins=bins, normed=True)
bins = [xedges, yedges]
plt.colorbar()
figname = "hist2d_{}_{}_{}.svg".format(self._cgs[0].name, x, y)
plt.savefig(figname)
log.info("Figure {} created".format(figname))
plt.clf()
plt.close()
print("BINS", bins)
return bins
def view_2d_projection(self, ref_ensemble=None, x="rmsd_to_reference", y="rmsd_to_last", cluster=False, circular=False):
"""
Plot a 2D projection of the ensemble to the given x and y axis,
and visualize the results of clustering with DBSCAN.
:param ref_ensemble: An ensemble or a list of cgs. Plotted as a background in the images.
:param x: A STRING. The descriptor name used as x axis.
:param y: A STRING. The descriptor name used as y axis.
Saves the resulting plot as a svg in the current directory.
"""
if circular:
fig, ax = plt.subplots(1, subplot_kw=dict(projection='polar'))
else:
fig, ax = plt.subplots(1)
# Label the plot
ax.set_xlabel(x)
ax.set_ylabel(y)
# First, plot the background (reference ensemble)
if ref_ensemble is not None:
log.info("Reference ensemble given")
ref_x = calculate_descriptor_for(
x, ref_ensemble, *self._get_args_for(x))
ref_y = calculate_descriptor_for(
y, ref_ensemble, *self._get_args_for(y))
log.info("Plotting reference")
ax.plot(ref_x, ref_y, 's', markerfacecolor="green",
markeredgecolor='green', markersize=8)
else:
log.info("Reference ensemble missing")
# Get the data for plotting of the ensemble
data_x = self._get_descriptor(x)
data_y = self._get_descriptor(y)
if ref_ensemble is not None:
# Without duplicates
print("KS-Test without duplicates for {} : {}".format(x,
scipy.stats.ks_2samp(data_x, ref_x)))
print("KS-Test without duplicates for {} : {}".format(y,
scipy.stats.ks_2samp(data_y, ref_y)))
# With duplicates
# Correctly account for duplicates
full_x = [data_x[i] for i in self._cg_sequence]
full_y = [data_y[i] for i in self._cg_sequence]
print("KS-Test for {} : {}".format(x,
scipy.stats.ks_2samp(full_x, ref_x)))
print("KS-Test for {} : {}".format(y,
scipy.stats.ks_2samp(full_y, ref_y)))
if cluster:
# In the background, plot lines to show the sampling trajectory
ax.plot(data_x, data_y, '-', color="blue")
# Then cluster all structures based on pairwise RMSD
db = self._cluster_dbscan()
labels = db.labels_
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
ax.plot(data_x[class_member_mask & core_samples_mask],
data_y[class_member_mask & core_samples_mask],
'o', markerfacecolor=col, markeredgecolor='k', markersize=6
)
ax.plot(data_x[class_member_mask & ~core_samples_mask],
data_y[class_member_mask & ~core_samples_mask],
'o', markerfacecolor=col, markeredgecolor=col, markersize=1
)
else:
# In the background, plot lines to show the sampling trajectory
ax.plot(data_x, data_y, '-o', color="blue")
if self._reference_cg:
ax.plot(calculate_descriptor_for(x, [self._reference_cg], *self._get_args_for(x)),
calculate_descriptor_for(
y, [self._reference_cg], *self._get_args_for(y)),
"x", color="red", markersize=12, label="reference")
figname = "cluster_{}_{}_{}{}.svg".format(
self._cgs[0].name, x, y, "circ" * circular)
plt.savefig(figname)
log.info("Figure {} created".format(figname))
plt.clf()
plt.close()
def ensemble_pca(self, ref_ensemble=None, ref_first=True):
data = prepare_pca_input(self._cgs)
pca = PCA(n_components=2)
if ref_ensemble:
ref_data = prepare_pca_input(ref_ensemble)
if ref_first:
pca.fit(ref_data)
if not ref_ensemble or not ref_first:
pca.fit(data)
reduced_data = pca.transform(data)
if ref_ensemble:
reduced_ref = pca.transform(ref_data)
plt.scatter(reduced_ref[:, 0], reduced_ref[:, 1],
color="green", label="background")
plt.scatter(reduced_data[:, 0], reduced_data[:,
1], color="blue", label="sampling")
if self._reference_cg:
data_true = prepare_pca_input([self._reference_cg])
reduced_true = pca.transform(data_true)
plt.scatter(reduced_true[:, 0], reduced_true[:,
1], color="red", label="reference")
plt.xlabel("First principal component")
plt.ylabel("Second principal component")
figname = "pca_{}_rf{}.svg".format(self._cgs[0].name, ref_first)
plt.savefig(figname)
log.info("Figure {} created".format(figname))
plt.clf()
plt.close()
def create_element_csv(self, outname):
"""
:param outname: The outfile name to be written
"""
data = defaultdict(list)
for i, cg in enumerate(self._cgs):
build_order = cg.traverse_graph()
for elem in cg.mst:
if elem[0] not in "mi":
continue
line = cg.sampled[elem]
# load angle_stats in direction of build order!
for bo in build_order:
if bo[1] == elem:
stat = cg.get_bulge_angle_stats_core(
elem, (bo[0], bo[2]))
stat.pdb_name = line[0]
# Use correct multiplicity (self._cg has subsequent duplicates removed)
for j in range(self._cg_sequence.count(i)):
data["cg_name"].append(cg.name)
data["key"].append(self._cg_rev_lookup[i][j])
data["elem_name"].append(elem)
data["stat_name"].append(stat.pdb_name)
data["u"].append(stat.u)
data["v"].append(stat.v)
data["angle"].append(stat.get_angle())
df = pd.DataFrame(data)
df.to_csv(outname)
def prepare_pca_input(cgs):
data = []
for cg in cgs:
# reshape(-1) returns a flattened view
data.append(ftuv.center_on_centroid(
cg.get_ordered_stem_poss()).reshape(-1))
return np.array(data)
def get_energy_image(data_x, data_y, energies, bins):
image = np_nans([len(bins[0]) - 1, len(bins[1]) - 1])
bins_x = np.digitize(data_x, bins[0])
bins_y = np.digitize(data_y, bins[1])
for i in range(len(data_x)):
bx = bins_x[i] - 1
by = bins_y[i] - 1
if bx < 0:
bx = 0
if by < 0:
by = 0
if bx >= len(image):
bx = len(image) - 1
if by >= len(image[0]):
by = len(image[0]) - 1
if np.isnan(image[bx, by]):
image[bx, by] = energies[i]
else:
image[bx, by] = min(energies[i], image[bx, by])
return image
class DescriptorCalc(object):
"""
Helper class to calculate descriptors of Coarse grained RNAs for an ensemble (or a list) of cgs.
"""
@staticmethod
def rmsd_to_stru(cgs, reference_cg):
rmsd = np_nans(len(cgs))
for i, cg in enumerate(cgs):
rmsd[i] = ftms.cg_rmsd(cg, reference_cg)
return rmsd
@staticmethod
def rog(cgs):
rogs = np_nans(len(cgs))
for i, cg in enumerate(cgs):
rogs[i] = cg.radius_of_gyration()
return rogs
@staticmethod
def anisotropy(cgs):
ai = np_nans(len(cgs))
for i, cg in enumerate(cgs):
ai[i] = ftmd.anisotropy(cg.get_ordered_stem_poss())
return ai
@staticmethod
def info_energy(cgs):
e = np_nans(len(cgs))
for i, cg in enumerate(cgs):
try:
e[i] = float(cg.infos["totalEnergy"][0].split()[0])
except:
e[i] = float("nan")
return e
@staticmethod
def cg_distance(cgs, elem):
d = []
for cg in cgs:
start, end = cg.coords[elem]
d.append(ftuv.vec_distance(start, end))
return d
@staticmethod
def stat_angle(cgs, elem):
d = []
for cg in cgs:
angle = cg.get_stats(elem)[0].get_angle()
d.append(angle)
return d
@staticmethod
def cg_dist_difference(cgs, elem1, elem2):
d = []
for cg in cgs:
start, end = cg.coords[elem1]
d1 = ftuv.vec_distance(start, end)
start, end = cg.coords[elem2]
d2 = ftuv.vec_distance(start, end)
d.append(d1 - d2)
return d
@staticmethod
def cg_dist_sum(cgs, elem1, elem2):
d = []
for cg in cgs:
start, end = cg.coords[elem1]
d1 = ftuv.vec_distance(start, end)
start, end = cg.coords[elem2]
d2 = ftuv.vec_distance(start, end)
d.append(d1 + d2)
return d
valid_descriptors = {
"rmsd_to_reference": DescriptorCalc.rmsd_to_stru,
"rmsd_to_last": DescriptorCalc.rmsd_to_stru,
"rog": DescriptorCalc.rog,
"ROG": DescriptorCalc.rog,
"anisotropy": DescriptorCalc.anisotropy,
"info_energy": DescriptorCalc.info_energy
}
def calculate_descriptor_for(descriptor_name, cgs, *args):
"""Calculate a descriptor."""
if descriptor_name.startswith("stat_angle"):
elem = descriptor_name.split("_")[-1]
return DescriptorCalc.stat_angle(cgs, elem)
elif descriptor_name.startswith("cg_distance"):
elem = descriptor_name.split("_")[-1]
return DescriptorCalc.cg_distance(cgs, elem)
elif descriptor_name.startswith("cg_dist_sum"):
elem1, elem2 = descriptor_name.split("_")[-2:]
return DescriptorCalc.cg_dist_sum(cgs, elem1, elem2)
elif descriptor_name.startswith("cg_dist_difference"):
elem1, elem2 = descriptor_name.split("_")[-2:]
return DescriptorCalc.cg_dist_difference(cgs, elem1, elem2)
elif descriptor_name not in valid_descriptors:
raise ValueError("Unknown descriptor {}".format(descriptor_name))
else:
return valid_descriptors[descriptor_name](cgs, *args)
|
gpl-3.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/indexes/period/test_period.py
|
1
|
21238
|
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, NaT, Period, PeriodIndex, Series,
date_range, offsets, period_range)
from pandas.util import testing as tm
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
def setup_method(self, method):
self.indices = dict(index=tm.makePeriodIndex(10),
index_dec=period_range('20130101', periods=10,
freq='D')[::-1])
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_pickle_compat_construction(self):
pass
@pytest.mark.parametrize('freq', ['D', 'M', 'A'])
def test_pickle_round_trip(self, freq):
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
def test_where(self):
# This is handled in test_indexing
pass
@pytest.mark.parametrize('use_numpy', [True, False])
@pytest.mark.parametrize('index', [
pd.period_range('2000-01-01', periods=3, freq='D'),
pd.period_range('2001-01-01', periods=3, freq='2D'),
pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')])
def test_repeat_freqstr(self, index, use_numpy):
# GH10183
expected = PeriodIndex([p for p in index for _ in range(3)])
result = np.repeat(index, 3) if use_numpy else index.repeat(3)
tm.assert_index_equal(result, expected)
assert result.freqstr == index.freqstr
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
tm.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna(
pd.Period('2011-01-01', freq='D')), exp)
def test_no_millisecond_field(self):
with pytest.raises(AttributeError):
DatetimeIndex.millisecond
with pytest.raises(AttributeError):
DatetimeIndex([]).millisecond
@pytest.mark.parametrize("sort", [None, False])
def test_difference_freq(self, sort):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other, sort)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_hash_error(self):
index = period_range('20010101', periods=10)
with pytest.raises(TypeError, match=("unhashable type: %r" %
type(index).__name__)):
hash(index)
def test_make_time_series(self):
index = period_range(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_shallow_copy_i8(self):
# GH-24391
pi = period_range("2018-01-01", periods=3, freq="2D")
result = pi._shallow_copy(pi.asi8, freq=pi.freq)
tm.assert_index_equal(result, pi)
def test_shallow_copy_changing_freq_raises(self):
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(IncompatibleFrequency, match="are different"):
pi._shallow_copy(pi, freq="H")
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
assert pi.dtype_str == 'period[M]'
assert pi.dtype_str == str(pi.dtype)
pi = pd.PeriodIndex([], freq='3M')
assert pi.dtype_str == 'period[3M]'
assert pi.dtype_str == str(pi.dtype)
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
def test_period_index_length(self):
pi = period_range(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = period_range(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = period_range(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
start = Period('02-Apr-2005', 'B')
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
try:
period_range(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = period_range(start=start, end=end_intv)
try:
period_range(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = period_range(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = period_range(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = period_range(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = period_range(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = period_range(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = period_range(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = period_range(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = period_range(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = period_range(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'dayofyear',
'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_is_(self):
create_index = lambda: period_range(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq('M'))
assert not index.is_(index.asfreq('A'))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
def test_contains_nat(self):
# see gh-13582
idx = period_range('2007-01', freq='M', periods=10)
assert pd.NaT not in idx
assert None not in idx
assert float('nan') not in idx
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert pd.NaT in idx
assert None in idx
assert float('nan') in idx
assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_start_time(self):
# GH 17157
index = period_range(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
# GH 17157
index = period_range(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
expected_index = expected_index.shift(1, freq='D').shift(-1, freq='ns')
tm.assert_index_equal(index.end_time, expected_index)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift(self):
# This is tested in test_arithmetic
pass
@td.skip_if_32bit
def test_ndarray_compat_properties(self):
super(TestPeriodIndex, self).test_ndarray_compat_properties()
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D', name='name')
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s['05Q4'] == s[2]
def test_pindex_multiples(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
def test_iteration(self):
index = period_range(start='1/1/10', periods=4, freq='B')
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == 'M'
def test_map(self):
# test_map_dictlike generally tests
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
def test_join_self(self, join_type):
index = period_range('1/1/2000', periods=10)
joined = index.join(index, how=join_type)
assert index is joined
def test_insert(self):
# GH 18295 (test missing)
expected = PeriodIndex(
['2017Q1', pd.NaT, '2017Q2', '2017Q3', '2017Q4'], freq='Q')
for na in (np.nan, pd.NaT, None):
result = period_range('2017Q1', periods=4, freq='Q').insert(1, na)
tm.assert_index_equal(result, expected)
def test_maybe_convert_timedelta():
pi = PeriodIndex(['2000', '2001'], freq='D')
offset = offsets.Day(2)
assert pi._maybe_convert_timedelta(offset) == 2
assert pi._maybe_convert_timedelta(2) == 2
offset = offsets.BusinessDay()
with pytest.raises(ValueError, match='freq'):
pi._maybe_convert_timedelta(offset)
|
bsd-3-clause
|
kdebrab/pandas
|
pandas/tests/groupby/test_timegrouper.py
|
5
|
26980
|
""" test with the TimeGrouper / grouping with datetimes """
import pytest
import pytz
from datetime import datetime
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (DataFrame, date_range, Index,
Series, MultiIndex, Timestamp, DatetimeIndex)
from pandas.core.groupby.ops import BinGrouper
from pandas.compat import StringIO
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestGroupBy(object):
def test_groupby_with_timegrouper(self):
# GH 4161
# TimeGrouper requires a sorted index
# also verifies that the resultant index has the correct name
df_original = DataFrame({
'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [
datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0),
]
})
# GH 6908 change target column's order
df_reordered = df_original.sort_values(by='Quantity')
for df in [df_original, df_reordered]:
df = df.set_index(['Date'])
expected = DataFrame(
{'Quantity': 0},
index=date_range('20130901 13:00:00',
'20131205 13:00:00', freq='5D',
name='Date', closed='left'))
expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype='int64')
result1 = df.resample('5D') .sum()
assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
result2 = df_sorted.groupby(pd.Grouper(freq='5D')).sum()
assert_frame_equal(result2, expected)
result3 = df.groupby(pd.Grouper(freq='5D')).sum()
assert_frame_equal(result3, expected)
@pytest.mark.parametrize("should_sort", [True, False])
def test_groupby_with_timegrouper_methods(self, should_sort):
# GH 3881
# make sure API of timegrouper conforms
df = pd.DataFrame({
'Branch': 'A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 8, 9, 3],
'Date': [
datetime(2013, 1, 1, 13, 0),
datetime(2013, 1, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 12, 2, 14, 0),
]
})
if should_sort:
df = df.sort_values(by='Quantity', ascending=False)
df = df.set_index('Date', drop=False)
g = df.groupby(pd.Grouper(freq='6M'))
assert g.group_keys
assert isinstance(g.grouper, BinGrouper)
groups = g.groups
assert isinstance(groups, dict)
assert len(groups) == 3
def test_timegrouper_with_reg_groups(self):
# GH 3794
# allow combinateion of timegrouper/reg groups
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
datetime(2013, 1, 1, 13, 0),
datetime(2013, 1, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 12, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
datetime(2013, 12, 31, 0, 0),
datetime(2013, 12, 31, 0, 0),
datetime(2013, 12, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='A'), 'Buyer']).sum()
assert_frame_equal(result, expected)
expected = DataFrame({
'Buyer': 'Carl Mark Carl Joe'.split(),
'Quantity': [1, 3, 9, 18],
'Date': [
datetime(2013, 1, 1, 0, 0),
datetime(2013, 1, 1, 0, 0),
datetime(2013, 7, 1, 0, 0),
datetime(2013, 7, 1, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='6MS'), 'Buyer']).sum()
assert_frame_equal(result, expected)
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
datetime(2013, 10, 1, 13, 0),
datetime(2013, 10, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 10, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark Carl Joe'.split(),
'Quantity': [6, 8, 3, 4, 10],
'Date': [
datetime(2013, 10, 1, 0, 0),
datetime(2013, 10, 1, 0, 0),
datetime(2013, 10, 1, 0, 0),
datetime(2013, 10, 2, 0, 0),
datetime(2013, 10, 2, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='1D'), 'Buyer']).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M'), 'Buyer']).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
datetime(2013, 10, 31, 0, 0),
datetime(2013, 10, 31, 0, 0),
datetime(2013, 10, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# passing the name
df = df.reset_index()
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
with pytest.raises(KeyError):
df.groupby([pd.Grouper(freq='1M', key='foo'), 'Buyer']).sum()
# passing the level
df = df.set_index('Date')
result = df.groupby([pd.Grouper(freq='1M', level='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', level=0), 'Buyer']).sum(
)
assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.groupby([pd.Grouper(freq='1M', level='foo'),
'Buyer']).sum()
# multi names
df = df.copy()
df['Date'] = df.index + pd.offsets.MonthEnd(2)
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
datetime(2013, 11, 30, 0, 0),
datetime(2013, 11, 30, 0, 0),
datetime(2013, 11, 30, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# error as we have both a level and a name!
with pytest.raises(ValueError):
df.groupby([pd.Grouper(freq='1M', key='Date',
level='Date'), 'Buyer']).sum()
# single groupers
expected = DataFrame({'Quantity': [31],
'Date': [datetime(2013, 10, 31, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M')]).sum()
assert_frame_equal(result, expected)
expected = DataFrame({'Quantity': [31],
'Date': [datetime(2013, 11, 30, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M', key='Date')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['D', 'M', 'A', 'Q-APR'])
def test_timegrouper_with_reg_groups_freq(self, freq):
# GH 6764 multiple grouping with/without sort
df = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, 364, 280, 259, 201, 623, 90, 312, 359, 301,
359, 801],
'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12]
}).set_index('date')
expected = (
df.groupby('user_id')['whole_cost']
.resample(freq)
.sum(min_count=1) # XXX
.dropna()
.reorder_levels(['date', 'user_id'])
.sort_index()
.astype('int64')
)
expected.name = 'whole_cost'
result1 = df.sort_index().groupby([pd.Grouper(freq=freq),
'user_id'])['whole_cost'].sum()
assert_series_equal(result1, expected)
result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[
'whole_cost'].sum()
assert_series_equal(result2, expected)
def test_timegrouper_get_group(self):
# GH 6914
df_original = DataFrame({
'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0), ]
})
df_reordered = df_original.sort_values(by='Quantity')
# single grouping
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
dt_list = ['2013-09-30', '2013-10-31', '2013-12-31']
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M', key='Date'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
# multiple grouping
expected_list = [df_original.iloc[[1]], df_original.iloc[[3]],
df_original.iloc[[4]]]
g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'),
('Joe', '2013-12-31')]
for df in [df_original, df_reordered]:
grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')])
for (b, t), expected in zip(g_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group((b, dt))
assert_frame_equal(result, expected)
# with index
df_original = df_original.set_index('Date')
df_reordered = df_original.sort_values(by='Quantity')
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
def test_timegrouper_apply_return_type_series(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_series(x):
return pd.Series([x['value'].sum()], ('sum',))
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_series)
result = (df_dt.groupby(pd.Grouper(freq='M', key='date'))
.apply(sumfunc_series))
assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_timegrouper_apply_return_type_value(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_value(x):
return x.value.sum()
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_value)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_value))
assert_series_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_groupby_groups_datetimeindex(self):
# #1430
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(periods),
'low': np.arange(periods)}, index=ind)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
# it works!
groups = grouped.groups
assert isinstance(list(groups.keys())[0], datetime)
# GH 11442
index = pd.date_range('2015/01/01', periods=5, name='date')
df = pd.DataFrame({'A': [5, 6, 7, 8, 9],
'B': [1, 2, 3, 4, 5]}, index=index)
result = df.groupby(level='date').groups
dates = ['2015-01-05', '2015-01-04', '2015-01-03',
'2015-01-02', '2015-01-01']
expected = {pd.Timestamp(date): pd.DatetimeIndex([date], name='date')
for date in dates}
tm.assert_dict_equal(result, expected)
grouped = df.groupby(level='date')
for date in dates:
result = grouped.get_group(date)
data = [[df.loc[date, 'A'], df.loc[date, 'B']]]
expected_index = pd.DatetimeIndex([date], name='date')
expected = pd.DataFrame(data,
columns=list('AB'),
index=expected_index)
tm.assert_frame_equal(result, expected)
def test_groupby_groups_datetimeindex_tz(self):
# GH 3950
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': dates,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['datetime'] = df['datetime'].apply(
lambda d: Timestamp(d, tz='US/Pacific'))
exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='datetime')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['datetime', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='Asia/Tokyo')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
assert result['date'][3] == Timestamp('2012-07-03')
def test_groupby_multi_timezone(self):
# combining multiple / different timezones yields UTC
data = """0,2000-01-28 16:47:00,America/Chicago
1,2000-01-29 16:48:00,America/Chicago
2,2000-01-30 16:49:00,America/Los_Angeles
3,2000-01-31 16:50:00,America/Chicago
4,2000-01-01 16:50:00,America/New_York"""
df = pd.read_csv(StringIO(data), header=None,
names=['value', 'date', 'tz'])
result = df.groupby('tz').date.apply(
lambda x: pd.to_datetime(x).dt.tz_localize(x.name))
expected = Series([Timestamp('2000-01-28 16:47:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-29 16:48:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-30 16:49:00-0800',
tz='America/Los_Angeles'),
Timestamp('2000-01-31 16:50:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-01 16:50:00-0500',
tz='America/New_York')],
name='date',
dtype=object)
assert_series_equal(result, expected)
tz = 'America/Chicago'
res_values = df.groupby('tz').date.get_group(tz)
result = pd.to_datetime(res_values).dt.tz_localize(tz)
exp_values = Series(['2000-01-28 16:47:00', '2000-01-29 16:48:00',
'2000-01-31 16:50:00'],
index=[0, 1, 3], name='date')
expected = pd.to_datetime(exp_values).dt.tz_localize(tz)
assert_series_equal(result, expected)
def test_groupby_groups_periods(self):
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'period': [pd.Period(d, freq='H') for d in dates],
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
exp_idx1 = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
freq='H', name='period')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['period', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.PeriodIndex(dates, freq='H')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], freq='H')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
assert issubclass(df[1].dtype.type, np.datetime64)
result = df.groupby(level=0).first()
got_dt = result[1].dtype
assert issubclass(got_dt.type, np.datetime64)
result = df[1].groupby(level=0).first()
got_dt = result.dtype
assert issubclass(got_dt.type, np.datetime64)
def test_groupby_max_datetime64(self):
# GH 5869
# datetimelike dtype conversion from int
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = df.groupby('A')['A'].apply(lambda x: x.max())
result = df.groupby('A')['A'].max()
assert_series_equal(result, expected)
def test_groupby_datetime64_32_bit(self):
# GH 6410 / numpy 4328
# 32-bit under 1.9-dev indexing issue
df = DataFrame({"A": range(2), "B": [pd.Timestamp('2000-01-1')] * 2})
result = df.groupby("A")["B"].transform(min)
expected = Series([pd.Timestamp('2000-01-1')] * 2, name='B')
assert_series_equal(result, expected)
def test_groupby_with_timezone_selection(self):
# GH 11616
# Test that column selection returns output in correct timezone.
np.random.seed(42)
df = pd.DataFrame({
'factor': np.random.randint(0, 3, size=60),
'time': pd.date_range('01/01/2000 00:00', periods=60,
freq='s', tz='UTC')
})
df1 = df.groupby('factor').max()['time']
df2 = df.groupby('factor')['time'].max()
tm.assert_series_equal(df1, df2)
def test_timezone_info(self):
# see gh-11682: Timezone info lost when broadcasting
# scalar datetime to DataFrame
df = pd.DataFrame({'a': [1], 'b': [datetime.now(pytz.utc)]})
assert df['b'][0].tzinfo == pytz.utc
df = pd.DataFrame({'a': [1, 2, 3]})
df['b'] = datetime.now(pytz.utc)
assert df['b'][0].tzinfo == pytz.utc
def test_datetime_count(self):
df = DataFrame({'a': [1, 2, 3] * 2,
'dates': pd.date_range('now', periods=6, freq='T')})
result = df.groupby('a').dates.count()
expected = Series([
2, 2, 2
], index=Index([1, 2, 3], name='a'), name='dates')
tm.assert_series_equal(result, expected)
def test_first_last_max_min_on_time_data(self):
# GH 10295
# Verify that NaT is not in the result of max, min, first and last on
# Dataframe with datetime or timedelta values.
from datetime import timedelta as td
df_test = DataFrame(
{'dt': [nan, '2015-07-24 10:10', '2015-07-25 11:11',
'2015-07-23 12:12', nan],
'td': [nan, td(days=1), td(days=2), td(days=3), nan]})
df_test.dt = pd.to_datetime(df_test.dt)
df_test['group'] = 'A'
df_ref = df_test[df_test.dt.notna()]
grouped_test = df_test.groupby('group')
grouped_ref = df_ref.groupby('group')
assert_frame_equal(grouped_ref.max(), grouped_test.max())
assert_frame_equal(grouped_ref.min(), grouped_test.min())
assert_frame_equal(grouped_ref.first(), grouped_test.first())
assert_frame_equal(grouped_ref.last(), grouped_test.last())
def test_nunique_with_timegrouper_and_nat(self):
# GH 17575
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
pd.NaT,
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']})
grouper = pd.Grouper(key='time', freq='h')
result = test.groupby(grouper)['data'].nunique()
expected = test[test.time.notnull()].groupby(grouper)['data'].nunique()
tm.assert_series_equal(result, expected)
def test_scalar_call_versus_list_call(self):
# Issue: 17530
data_frame = {
'location': ['shanghai', 'beijing', 'shanghai'],
'time': pd.Series(['2017-08-09 13:32:23', '2017-08-11 23:23:15',
'2017-08-11 22:23:15'],
dtype='datetime64[ns]'),
'value': [1, 2, 3]
}
data_frame = pd.DataFrame(data_frame).set_index('time')
grouper = pd.Grouper(freq='D')
grouped = data_frame.groupby(grouper)
result = grouped.count()
grouped = data_frame.groupby([grouper])
expected = grouped.count()
assert_frame_equal(result, expected)
|
bsd-3-clause
|
victor-prado/broker-manager
|
environment/lib/python3.5/site-packages/pandas/tools/tests/test_tile.py
|
7
|
10026
|
import os
import nose
import numpy as np
from pandas.compat import zip
from pandas import Series, Index
import pandas.util.testing as tm
from pandas.util.testing import assertRaisesRegexp
import pandas.core.common as com
from pandas.core.algorithms import quantile
from pandas.tools.tile import cut, qcut
import pandas.tools.tile as tmod
class TestCut(tm.TestCase):
def test_simple(self):
data = np.ones(5)
result = cut(data, 4, labels=False)
desired = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, desired,
check_dtype=False)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
exp_codes = np.array([0, 0, 0, 1, 2, 0], dtype=np.int8)
tm.assert_numpy_array_equal(result.codes, exp_codes)
exp = np.array([0.1905, 3.36666667, 6.53333333, 9.7])
tm.assert_almost_equal(bins, exp)
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
exp_codes = np.array([0, 0, 0, 2, 3, 0, 0], dtype=np.int8)
tm.assert_numpy_array_equal(result.codes, exp_codes)
exp = np.array([0.1905, 2.575, 4.95, 7.325, 9.7])
tm.assert_numpy_array_equal(bins, exp)
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
exp_codes = np.array([0, 0, 0, 2, 3, 0, 1], dtype=np.int8)
tm.assert_numpy_array_equal(result.codes, exp_codes)
exp = np.array([0.2, 2.575, 4.95, 7.325, 9.7095])
tm.assert_almost_equal(bins, exp)
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
exp_codes = np.array([0, 0, 0, 1, 2, 0], dtype=np.int8)
tm.assert_numpy_array_equal(result.codes, exp_codes)
exp = np.array([0.1905, 3.36666667, 6.53333333, 9.7])
tm.assert_almost_equal(bins, exp)
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
# h3h
self.assertRaises(ValueError, cut, [], 2)
self.assertRaises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
# #1511
s = Series([0, -1, 0, 1, -3], name='x')
ind = cut(s, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name='x')
tm.assert_series_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = Index(['(-0.001, 0.25]', '(0.25, 0.5]', '(0.5, 0.75]',
'(0.75, 1]'])
self.assert_index_equal(result.categories, ex_levels)
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = Index(['[0, 0.25)', '[0.25, 0.5)', '[0.5, 0.75)',
'[0.75, 1.001)'])
self.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
self.assertEqual(factor.name, 'foo')
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = Index(['(-0.00072, 0.18]', '(0.18, 0.36]',
'(0.36, 0.54]', '(0.54, 0.72]'])
self.assert_index_equal(result.categories, ex_levels)
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(com.isnull(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(com.isnull(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data, dtype='int64')
result = cut(data, [-np.inf, 2, 4, np.inf])
result_ser = cut(data_ser, [-np.inf, 2, 4, np.inf])
ex_categories = Index(['(-inf, 2]', '(2, 4]', '(4, inf]'])
tm.assert_index_equal(result.categories, ex_categories)
tm.assert_index_equal(result_ser.cat.categories, ex_categories)
self.assertEqual(result[5], '(4, inf]')
self.assertEqual(result[0], '(-inf, 2]')
self.assertEqual(result_ser[5], '(4, inf]')
self.assertEqual(result_ser[0], '(-inf, 2]')
def test_qcut(self):
arr = np.random.randn(1000)
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(bins, ex_bins)
ex_levels = cut(arr, ex_bins, include_lowest=True)
self.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
self.assertEqual(len(np.unique(factor)), 10)
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same(self):
assertRaisesRegexp(ValueError, "edges.*unique", qcut,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = result.codes == -1
ex_mask = (arr < -1) | (arr > 1)
self.assert_numpy_array_equal(mask, ex_mask)
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = cut(arr, bins)
exp.categories = labels
tm.assert_categorical_equal(result, exp)
def test_qcut_include_lowest(self):
values = np.arange(10)
cats = qcut(values, 4)
ex_levels = ['[0, 2.25]', '(2.25, 4.5]', '(4.5, 6.75]', '(6.75, 9]']
self.assertTrue((cats.categories == ex_levels).all())
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
self.assertTrue(com.isnull(result[:20]).all())
def test_label_formatting(self):
self.assertEqual(tmod._trim_zeros('1.000'), '1')
# it works
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
# #1979, negative numbers
result = tmod._format_label(-117.9998, precision=3)
self.assertEqual(result, '-118')
result = tmod._format_label(117.9998, precision=3)
self.assertEqual(result, '118')
def test_qcut_binning_issues(self):
# #1978, 1979
path = os.path.join(tm.get_data_path(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in result.categories:
s, e = lev[1:-1].split(',')
self.assertTrue(s != e)
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
self.assertTrue(sp < sn)
self.assertTrue(ep < en)
self.assertTrue(ep <= sn)
def test_cut_return_categorical(self):
from pandas import Categorical
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = cut(s, 3)
exp = Series(Categorical.from_codes([0, 0, 0, 1, 1, 1, 2, 2, 2],
["(-0.008, 2.667]",
"(2.667, 5.333]", "(5.333, 8]"],
ordered=True))
tm.assert_series_equal(res, exp)
def test_qcut_return_categorical(self):
from pandas import Categorical
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(s, [0, 0.333, 0.666, 1])
exp = Series(Categorical.from_codes([0, 0, 0, 1, 1, 1, 2, 2, 2],
["[0, 2.664]",
"(2.664, 5.328]", "(5.328, 8]"],
ordered=True))
tm.assert_series_equal(res, exp)
def test_series_retbins(self):
# GH 8589
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
tm.assert_numpy_array_equal(result.cat.codes.values,
np.array([0, 0, 1, 1], dtype=np.int8))
tm.assert_numpy_array_equal(bins, np.array([-0.003, 1.5, 3]))
result, bins = qcut(s, 2, retbins=True)
tm.assert_numpy_array_equal(result.cat.codes.values,
np.array([0, 0, 1, 1], dtype=np.int8))
tm.assert_numpy_array_equal(bins, np.array([0, 1.5, 3]))
def test_single_bin(self):
# issue 14652
expected = Series([0, 0])
s = Series([9., 9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
thientu/scikit-learn
|
sklearn/datasets/__init__.py
|
176
|
3671
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
MohammedWasim/scikit-learn
|
sklearn/tree/tests/test_export.py
|
130
|
9950
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
ejm553/NUREU17
|
LSST/SuperNovaLightCurves/process_SN.py
|
3
|
2912
|
#import necessary python libraries
import json
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit, minimize
import pandas as pd
import math
from JSON_to_DF import JSON_to_DataFrame
from Lightcurve_class import *
import celerite
import pickle
#Create Kernels for Gaussian Process
#Real term parameter initialization
a = 1e-5
c = 1
#Matern term parameter initialization
sig = 1e-5
rho = 100
#Bounds on parameters
bounds = dict(log_a = (-15,15), log_c = (-15,15))
bounds = dict(log_sigma = (-15, 15), log_rho = (-15, 15))
#Create Kernels
Real_Kernel = celerite.terms.RealTerm(log_a = np.log(a), log_c = np.log(c), bounds=bounds)
Matern_Kernel = celerite.terms.Matern32Term(log_sigma = np.log(sig), log_rho = np.log(rho))
#Create lists to store Rchi2 and median values for given fit
Poly4_median = []
Poly4_Rchi2 = []
Poly6_median = []
Poly6_Rchi2 = []
Poly8_median = []
Poly8_Rchi2 = []
GP_Real_median = []
GP_Real_Rchi2 = []
GP_Matern_median = []
GP_Matern_Rchi2 = []
Kapernka_median = []
Kapernka_Rchi2 = []
Bazin_median = []
Bazin_Rchi2 = []
#Loop through pickle files gathering Rchi2 and median data for each fit
directory = "../../../OSC_data/pickled_data/"
for roots, dirs, files in os.walk(directory):
for file in files:
SN = deserialize(directory + file)
for key in SN.Lightcurves.keys():
if(SN.Lightcurves[key].n_good_obs < 3):
continue
print(SN.name, key)
SN.Lightcurves[key].polynomial_fit_plot(4, plot=False)
SN.Lightcurves[key].polynomial_fit_plot(6, plot=False)
SN.Lightcurves[key].polynomial_fit_plot(8, plot=False)
SN.Lightcurves[key].Kapernka_fit_plot(plot=False)
SN.Lightcurves[key].Bazin_fit_plot(plot=False)
SN.Lightcurves[key].Gaussian_process(Real_Kernel, plot=False)
SN.Lightcurves[key].Gaussian_process(Matern_Kernel, plot=False)
print("Models fitted")
for fit, value in SN.Lightcurves[key].Rchi2.items():
if(fit == 'poly_4'):
Poly4_Rchi2.append(value)
elif(fit == 'poly_6'):
Poly6_Rchi2.append(value)
elif(fit == 'poly_8'):
Poly8_Rchi2.append(value)
elif(fit == 'GP_1'):
GP_Real_Rchi2.append(value)
elif(fit == 'GP_2'):
GP_Matern_Rchi2.append(value)
elif(fit == 'Kapernka'):
Kapernka_Rchi2.append(value)
elif(fit == 'Bazin'):
Bazin_Rchi2.append(value)
print("Rchi2 loaded")
for fit, value in SN.Lightcurves[key].medians.items():
if(fit == 'poly_4'):
Poly4_median.append(value)
elif(fit == 'poly_6'):
Poly6_median.append(value)
elif(fit == 'poly_8'):
Poly8_median.append(value)
#elif(key == 'GP'):
#GP_Real_median.append(value)
elif(fit == 'GP'):
GP_Matern_median.append(value)
elif(fit == 'Kapernka'):
Kapernka_median.append(value)
elif(fit == 'Bazin'):
Bazin_median.append(value)
print("medians loaded")
print(len(Poly6_median))
print(len(Poly6_Rchi2))
|
mit
|
flake123p/ProjectH
|
DSP/ThinkDSP/ws/sampling.py
|
1
|
9978
|
"""This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2015 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import thinkdsp
import thinkplot
import numpy as np
import matplotlib.pyplot as plt
PI2 = 2 * np.pi
FORMATS = ['pdf', 'eps']
def plot_beeps():
wave = thinkdsp.read_wave('253887__themusicalnomad__positive-beeps.wav')
wave.normalize()
thinkplot.preplot(3)
# top left
ax1 = plt.subplot2grid((4, 2), (0, 0), rowspan=2)
plt.setp(ax1.get_xticklabels(), visible=False)
wave.plot()
thinkplot.config(title='Input waves', legend=False)
# bottom left
imp_sig = thinkdsp.Impulses([0.01, 0.4, 0.8, 1.2],
amps=[1, 0.5, 0.25, 0.1])
impulses = imp_sig.make_wave(start=0, duration=1.3,
framerate=wave.framerate)
ax2 = plt.subplot2grid((4, 2), (2, 0), rowspan=2, sharex=ax1)
impulses.plot()
thinkplot.config(xlabel='Time (s)')
# center right
convolved = wave.convolve(impulses)
ax3 = plt.subplot2grid((4, 2), (1, 1), rowspan=2)
plt.title('Convolution')
convolved.plot()
thinkplot.config(xlabel='Time (s)')
thinkplot.save(root='sampling1',
formats=FORMATS,
legend=False)
XLIM = [-22050, 22050]
def plot_am():
wave = thinkdsp.read_wave('105977__wcfl10__favorite-station.wav')
wave.unbias()
wave.normalize()
# top
ax1 = thinkplot.preplot(6, rows=4)
spectrum = wave.make_spectrum(full=True)
spectrum.plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
#second
carrier_sig = thinkdsp.CosSignal(freq=10000)
carrier_wave = carrier_sig.make_wave(duration=wave.duration,
framerate=wave.framerate)
modulated = wave * carrier_wave
ax2 = thinkplot.subplot(2, sharey=ax1)
modulated.make_spectrum(full=True).plot(label='modulated')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
# third
demodulated = modulated * carrier_wave
demodulated_spectrum = demodulated.make_spectrum(full=True)
ax3 = thinkplot.subplot(3, sharey=ax1)
demodulated_spectrum.plot(label='demodulated')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
#fourth
ax4 = thinkplot.subplot(4, sharey=ax1)
demodulated_spectrum.low_pass(10000)
demodulated_spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root='sampling2',
formats=FORMATS)
#carrier_spectrum = carrier_wave.make_spectrum(full=True)
#carrier_spectrum.plot()
#convolved = spectrum.convolve(carrier_spectrum)
#convolved.plot()
#reconvolved = convolved.convolve(carrier_spectrum)
#reconvolved.plot()
def sample(wave, factor):
"""Simulates sampling of a wave.
wave: Wave object
factor: ratio of the new framerate to the original
"""
ys = np.zeros(len(wave))
ys[::factor] = wave.ys[::factor]
ts = wave.ts[:]
return thinkdsp.Wave(ys, ts, wave.framerate)
def make_impulses(wave, factor):
ys = np.zeros(len(wave))
ys[::factor] = 1
ts = np.arange(len(wave)) / wave.framerate
return thinkdsp.Wave(ys, ts, wave.framerate)
def plot_segments(original, filtered):
start = 1
duration = 0.01
original.segment(start=start, duration=duration).plot(color='gray')
filtered.segment(start=start, duration=duration).plot()
def plot_sampling(wave, root):
ax1 = thinkplot.preplot(2, rows=2)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax2 = thinkplot.subplot(2)
sampled = sample(wave, 4)
sampled.make_spectrum(full=True).plot(label='sampled')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
def plot_sampling2(wave, root):
ax1 = thinkplot.preplot(6, rows=4)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax2 = thinkplot.subplot(2)
impulses = make_impulses(wave, 4)
impulses.make_spectrum(full=True).plot(label='impulses')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax3 = thinkplot.subplot(3)
sampled = wave * impulses
spectrum = sampled.make_spectrum(full=True)
spectrum.plot(label='sampled')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax4 = thinkplot.subplot(4)
spectrum.low_pass(5512.5)
spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
def plot_sampling3(wave, root):
ax1 = thinkplot.preplot(6, rows=3)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
impulses = make_impulses(wave, 4)
ax2 = thinkplot.subplot(2)
sampled = wave * impulses
spectrum = sampled.make_spectrum(full=True)
spectrum.plot(label='sampled')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax3 = thinkplot.subplot(3)
spectrum.low_pass(5512.5)
spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
#filtered = spectrum.make_wave()
#plot_segments(wave, filtered)
def make_boxcar(spectrum, factor):
"""Makes a boxcar filter for the given spectrum.
spectrum: Spectrum to be filtered
factor: sampling factor
"""
fs = np.copy(spectrum.fs)
hs = np.zeros_like(spectrum.hs)
cutoff = spectrum.framerate / 2 / factor
for i, f in enumerate(fs):
if abs(f) <= cutoff:
hs[i] = 1
return thinkdsp.Spectrum(hs, fs, spectrum.framerate, full=spectrum.full)
def plot_sinc_demo(wave, factor, start=None, duration=None):
def make_sinc(t, i, y):
"""Makes a shifted, scaled copy of the sinc function."""
sinc = boxcar.make_wave()
sinc.shift(t)
sinc.roll(i)
sinc.scale(y * factor)
return sinc
def plot_mini_sincs(wave):
"""Plots sinc functions for each sample in wave."""
t0 = wave.ts[0]
for i in range(0, len(wave), factor):
sinc = make_sinc(t0, i, wave.ys[i])
seg = sinc.segment(start, duration)
seg.plot(color='green', linewidth=0.5, alpha=0.3)
if i == 0:
total = sinc
else:
total += sinc
seg = total.segment(start, duration)
seg.plot(color='blue', alpha=0.5)
sampled = sample(wave, factor)
spectrum = sampled.make_spectrum()
boxcar = make_boxcar(spectrum, factor)
start = wave.start if start is None else start
duration = wave.duration if duration is None else duration
sampled.segment(start, duration).plot_vlines(color='gray')
wave.segment(start, duration).plot(color='gray')
plot_mini_sincs(wave)
def plot_sincs(wave):
start = 1.0
duration = 0.01
factor = 4
short = wave.segment(start=start, duration=duration)
#short.plot()
sampled = sample(short, factor)
#sampled.plot_vlines(color='gray')
spectrum = sampled.make_spectrum(full=True)
boxcar = make_boxcar(spectrum, factor)
sinc = boxcar.make_wave()
sinc.shift(sampled.ts[0])
sinc.roll(len(sinc)//2)
thinkplot.preplot(2, cols=2)
sinc.plot()
thinkplot.config(xlabel='Time (s)')
thinkplot.subplot(2)
boxcar.plot()
thinkplot.config(xlabel='Frequency (Hz)',
ylim=[0, 1.05],
xlim=[-boxcar.max_freq, boxcar.max_freq])
thinkplot.save(root='sampling6',
formats=FORMATS)
return
# CAUTION: don't call plot_sinc_demo with a large wave or it will
# fill memory and crash
plot_sinc_demo(short, 4)
thinkplot.config(xlabel='Time (s)')
thinkplot.save(root='sampling7',
formats=FORMATS)
start = short.start + 0.004
duration = 0.00061
plot_sinc_demo(short, 4, start, duration)
thinkplot.config(xlabel='Time (s)',
xlim=[start, start+duration],
ylim=[-0.06, 0.17], legend=False)
thinkplot.save(root='sampling8',
formats=FORMATS)
def kill_yticklabels():
axis = plt.gca()
plt.setp(axis.get_yticklabels(), visible=False)
def show_impulses(wave, factor, i):
thinkplot.subplot(i)
thinkplot.preplot(2)
impulses = make_impulses(wave, factor)
impulses.segment(0, 0.001).plot_vlines(linewidth=2)
if i == 1:
thinkplot.config(title='Impulse train',
ylim=[0, 1.05])
else:
thinkplot.config(xlabel='Time (s)',
ylim=[0, 1.05])
thinkplot.subplot(i+1)
impulses.make_spectrum(full=True).plot()
kill_yticklabels()
if i == 1:
thinkplot.config(title='DFT of impulse train',
xlim=[-22400, 22400])
else:
thinkplot.config(xlabel='Frequency (Hz)',
xlim=[-22400, 22400])
def plot_impulses(wave):
thinkplot.preplot(rows=2, cols=2)
show_impulses(wave, 4, 1)
show_impulses(wave, 8, 3)
thinkplot.save('sampling9',
formats=FORMATS)
def main():
wave = thinkdsp.read_wave('328878__tzurkan__guitar-phrase-tzu.wav')
wave.normalize()
plot_sampling3(wave, 'sampling5')
plot_sincs(wave)
plot_beeps()
plot_am()
wave = thinkdsp.read_wave('263868__kevcio__amen-break-a-160-bpm.wav')
wave.normalize()
plot_impulses(wave)
plot_sampling(wave, 'sampling3')
plot_sampling2(wave, 'sampling4')
if __name__ == '__main__':
main()
|
gpl-3.0
|
with-git/tensorflow
|
tensorflow/contrib/timeseries/examples/lstm.py
|
17
|
9460
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=
lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _transform(self, data):
"""Normalize data based on input statistics to encourage stable training."""
mean, variance = self._input_statistics.overall_feature_moments
return (data - mean) / variance
def _de_transform(self, data):
"""Transform data back to the input scale."""
mean, variance = self._input_statistics.overall_feature_moments
return data * variance + mean
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
transformed_values = self._transform(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, lstm_state = state
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=previous_observation_or_prediction, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction, new_lstm_state)
return new_state_tuple, {"mean": self._de_transform(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update model state based on exogenous regressors."""
raise NotImplementedError(
"Exogenous inputs are not implemented for this example.")
def train_and_predict(csv_file_name=_DATA_FILE, training_steps=200):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128),
optimizer=tf.train.AdamOptimizer(0.001))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
apache-2.0
|
yavalvas/yav_com
|
build/matplotlib/lib/mpl_examples/axes_grid/demo_curvelinear_grid2.py
|
15
|
1839
|
import numpy as np
#from matplotlib.path import Path
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axes_grid.axislines import Subplot
import mpl_toolkits.axes_grid.angle_helper as angle_helper
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
sgn = np.sign(x)
x, y = np.abs(np.asarray(x)), np.asarray(y)
return sgn*x**.5, y
def inv_tr(x,y):
sgn = np.sign(x)
x, y = np.asarray(x), np.asarray(y)
return sgn*x**2, y
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = None,
lat_cycle = None,
lon_minmax = None, #(0, np.inf),
lat_minmax = None,
)
grid_helper = GridHelperCurveLinear((tr, inv_tr),
extreme_finder=extreme_finder)
ax1 = Subplot(fig, 111, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
ax1.imshow(np.arange(25).reshape(5,5),
vmax = 50, cmap=plt.cm.gray_r,
interpolation="nearest",
origin="lower")
# tick density
grid_helper.grid_finder.grid_locator1._nbins = 6
grid_helper.grid_finder.grid_locator2._nbins = 6
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
plt.show()
|
mit
|
Dapid/scipy
|
scipy/stats/morestats.py
|
5
|
78562
|
# Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, angle, count_nonzero)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n + 1, 'd')
data = ravel(data)
N = len(data)
for k in range(1, n + 1):
S[k] = sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Probability Plot')
plot.set_xlabel('Quantiles')
plot.set_ylabel('Ordered Values')
else:
# matplotlib.pyplot module
plot.title('Probability Plot')
plot.xlabel('Quantiles')
plot.ylabel('Ordered Values')
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""Returns (shape, ppcc), and optionally plots shape vs. ppcc
(probability plot correlation coefficient) as a function of shape
parameter for a one-parameter family of distributions from shape
value a to b.
See also ppcc_max
"""
svals = r_[a:b:complex(N)]
ppcc = svals * 0.0
k = 0
for sval in svals:
r1, r2 = probplot(x, sval, dist=dist, fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
plot.plot(svals, ppcc, 'x')
plot.title('(%s) PPCC Plot' % dist)
plot.xlabel('Prob Plot Corr. Coef.')
plot.ylabel('Shape Values')
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Box-Cox Normality Plot')
plot.set_ylabel('Prob Plot Corr. Coef.')
plot.set_xlabel('$\lambda$')
else:
# matplotlib.pyplot module
plot.title('Box-Cox Normality Plot')
plot.ylabel('Prob Plot Corr. Coef.')
plot.xlabel('$\lambda$')
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2), axis=0) - 0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return A2, critical, sig
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
p : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return A2, critical, p
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
AB : float
The Ansari-Bradley test statistic
p-value : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * sum(a1[find:], axis=0) / total
else:
pval = 2.0 * sum(a1[find+1:], axis=0) / total
return AB, min(1.0, pval)
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
T : float
The test statistic.
p-value : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni, axis=0)
spsq = sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return T, pval
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return W, pval
@setastest(False)
def binom_test(x, n=None, p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args, **kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
T : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
p-value : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return T, prob
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j * ang), axis=axis))
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
|
bsd-3-clause
|
abhishekgahlot/scikit-learn
|
examples/text/mlcomp_sparse_document_classification.py
|
292
|
4498
|
"""
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
|
bsd-3-clause
|
kevin-coder/tensorflow-fork
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
|
39
|
32726
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
|
apache-2.0
|
jmmease/pandas
|
pandas/core/strings.py
|
2
|
59889
|
import numpy as np
from pandas.compat import zip
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_object_dtype,
is_string_like,
is_list_like,
is_scalar,
is_integer,
is_re)
from pandas.core.common import _values_from_object
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.core.base import NoNewAttributesMixin
from pandas.util._decorators import Appender
import re
import pandas._libs.lib as lib
import warnings
import textwrap
import codecs
_cpython_optimized_encoders = (
"utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii"
)
_cpython_optimized_decoders = _cpython_optimized_encoders + (
"utf-16", "utf-32"
)
_shared_docs = dict()
def _get_array_list(arr, others):
from pandas.core.series import Series
if len(others) and isinstance(_values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
return [np.asarray(x, dtype=object) for x in arrays]
def str_cat(arr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
Parameters
----------
others : list-like, or list of list-likes
If None, returns str concatenating strings of the Series
sep : string or None, default None
na_rep : string or None, default None
If None, NA in the series are ignored.
Returns
-------
concat : Series/Index of objects or str
Examples
--------
When ``na_rep`` is `None` (default behavior), NaN value(s)
in the Series are ignored.
>>> Series(['a','b',np.nan,'c']).str.cat(sep=' ')
'a b c'
>>> Series(['a','b',np.nan,'c']).str.cat(sep=' ', na_rep='?')
'a b ? c'
If ``others`` is specified, corresponding values are
concatenated with the separator. Result will be a Series of strings.
>>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')
0 a,A
1 b,B
2 c,C
dtype: object
Otherwise, strings in the Series are concatenated. Result will be a string.
>>> Series(['a', 'b', 'c']).str.cat(sep=',')
'a,b,c'
Also, you can pass a list of list-likes.
>>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')
0 a,x,1
1 b,y,2
dtype: object
"""
if sep is None:
sep = ''
if others is not None:
arrays = _get_array_list(arr, others)
n = _length_check(arrays)
masks = np.array([isna(x) for x in arrays])
cats = None
if na_rep is None:
na_mask = np.logical_or.reduce(masks, axis=0)
result = np.empty(n, dtype=object)
np.putmask(result, na_mask, np.nan)
notmask = ~na_mask
tuples = zip(*[x[notmask] for x in arrays])
cats = [sep.join(tup) for tup in tuples]
result[notmask] = cats
else:
for i, x in enumerate(arrays):
x = np.where(masks[i], na_rep, x)
if cats is None:
cats = x
else:
cats = cats + sep + x
result = cats
return result
else:
arr = np.asarray(arr, dtype=object)
mask = isna(arr)
if na_rep is None and mask.any():
if sep == '':
na_rep = ''
else:
return sep.join(arr[notna(arr)])
return sep.join(np.where(mask, na_rep, arr))
def _length_check(others):
n = None
for x in others:
try:
if n is None:
n = len(x)
elif len(x) != n:
raise ValueError('All arrays must be same length')
except TypeError:
raise ValueError("Did you mean to supply a `sep` keyword?")
return n
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
Parameters
----------
pat : string, valid regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
counts : Series/Index of integer values
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Return boolean Series/``array`` whether given pattern/regex is
contained in each string in the Series/Index.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
regex : bool, default True
If True use re.search, otherwise use Python in operator
Returns
-------
contained : Series/array of boolean values
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean Series/``array`` indicating whether each string in the
Series/Index starts with passed pattern. Equivalent to
:meth:`str.startswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : Series/array of boolean values
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean Series indicating whether each string in the
Series/Index ends with passed pattern. Equivalent to
:meth:`str.endswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : Series/array of boolean values
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
Returns
-------
replaced : Series/Index of objects
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case` or `flags` with a compiled regex will
raise an error.
Examples
--------
When `repl` is a string, every `pat` is replaced as with
:meth:`str.replace`. NaN value(s) in the Series are left as is.
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', 'b')
0 boo
1 buz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError("case and flags cannot be set"
" when pat is a compiled regex")
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
use_re = is_compiled_re or len(pat) > 1 or flags or callable(repl)
if use_re:
n = n if n >= 0 else 0
regex = re.compile(pat, flags=flags)
f = lambda x: regex.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series/Index by indicated number
of times.
Parameters
----------
repeats : int or array
Same value for all (int) or different value per (array)
Returns
-------
repeated : Series/Index of objects
"""
if is_scalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = lib.vec_binop(_values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=None):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
as_indexer : DEPRECATED - Keyword is ignored.
Returns
-------
Series/array of boolean values
See Also
--------
contains : analogous, but less strict, relying on re.search instead of
re.match
extract : extract matched groups
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if (as_indexer is False) and (regex.groups > 0):
raise ValueError("as_indexer=False with a pattern with groups is no "
"longer supported. Use '.str.extract(pat)' instead")
elif as_indexer is not None:
# Previously, this keyword was used for changing the default but
# deprecated behaviour. This keyword is now no longer needed.
warnings.warn("'as_indexer' keyword was specified but is ignored "
"(match now returns a boolean indexer by default), "
"and will be removed in a future version.",
FutureWarning, stacklevel=3)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object)
def str_extract(arr, pat, flags=0, expand=None):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat.
Parameters
----------
pat : string
Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
expand : bool, default False
* If True, return DataFrame.
* If False, return Series/Index/DataFrame.
.. versionadded:: 0.18.0
Returns
-------
DataFrame with one row for each subject string, and one column for
each group. Any capture group names in regular expression pat will
be used for column names; otherwise capture group numbers will be
used. The dtype of each result column is always object, even when
no match is found. If expand=False and pat has only one capture group,
then return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : returns all matches (not just the first match)
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = Series(['a1', 'b2', 'c3'])
>>> s.str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract('[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract('[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if expand is None:
warnings.warn(
"currently extract(expand=None) " +
"means expand=False (return Index/Series/DataFrame) " +
"but in a future version of pandas this will be changed " +
"to expand=True (return DataFrame)",
FutureWarning,
stacklevel=3)
expand = False
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._data, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : string
Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
A DataFrame with one row for each match, and one column for each
group. Its rows have a MultiIndex with first levels that come from
the subject Series. The last level is named 'match' and indicates
the order in the subject. Any capture group names in regular
expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : returns first match only (not all matches)
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall("[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall("[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall("(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall("(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key, )
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group
for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i, ))
index_list.append(result_key)
if 0 < len(index_list):
from pandas import MultiIndex
index = MultiIndex.from_tuples(
index_list, names=arr.index.names + ["match"])
else:
index = None
result = arr._constructor_expanddim(match_list, index=index,
columns=columns)
return result
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - set([""]))
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with
passed delimiter. Equivalent to :meth:`str.join`.
Parameters
----------
sep : string
Delimiter
Returns
-------
joined : Series/Index of objects
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the
Series/Index. Equivalent to :func:`re.findall`.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matches : Series/Index of lists
See Also
--------
extractall : returns DataFrame with one column per capture group
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index with an additional character to
specified side.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with spaces
side : {'left', 'right', 'both'}, default 'left'
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
padded : Series/Index of objects
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if not is_integer(width):
msg = 'width must be of integer type, not {0}'
raise TypeError(msg.format(type(width).__name__))
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
"""
Split each string (a la re.split) in the Series/Index by given
pattern, propagating NA values. Equivalent to :meth:`str.split`.
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
return_type : deprecated, use `expand`
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
"""
Split each string in the Series/Index by the given delimiter
string, starting at the end of the string and working to the front.
Equivalent to :meth:`str.rsplit`.
Parameters
----------
pat : string, default None
Separator to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
Parameters
----------
start : int or None
stop : int or None
step : int or None
Returns
-------
sliced : Series/Index of objects
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a slice of each string in the Series/Index with another
string.
Parameters
----------
start : int or None
stop : int or None
repl : str or None
String for replacement
Returns
-------
replaced : Series/Index of objects
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table "
"argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : Series/Index of objects
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._data, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._data, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._data, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._data, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._is_categorical = is_categorical_dtype(data)
self._data = data.cat.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True,
name=None, expand=None):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when infered
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, 'name', None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
@copy(str_cat)
def cat(self, others=None, sep=None, na_rep=None):
data = self._orig if self._is_categorical else self._data
result = str_cat(data, others=others, sep=sep, na_rep=na_rep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@copy(str_split)
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._data, pat, n=n)
return self._wrap_result(result, expand=expand)
@copy(str_rsplit)
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._data, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : string, default whitespace
String to split on.
expand : bool, default True
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Examples
--------
>>> s = Series(['A_B_C', 'D_E_F', 'X'])
0 A_B_C
1 D_E_F
2 X
dtype: object
>>> s.str.partition('_')
0 1 2
0 A _ B_C
1 D _ E_F
2 X
>>> s.str.rpartition('_')
0 1 2
0 A_B _ C
1 D_E _ F
2 X
""")
@Appender(_shared_docs['str_partition'] % {
'side': 'first',
'return': '3 elements containing the string itself, followed by two '
'empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'
})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self._data)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {
'side': 'last',
'return': '3 elements containing two empty strings, followed by the '
'string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'
})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self._data)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._data, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._data, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._data, pat, case=case, flags=flags, na=na,
regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None):
result = str_match(self._data, pat, case=case, flags=flags, na=na,
as_indexer=as_indexer)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=None, flags=0):
result = str_replace(self._data, pat, repl, n=n, case=case,
flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._data, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._data, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
"""
Filling left side of strings in the Series/Index with 0.
Equivalent to :meth:`str.zfill`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with 0
Returns
-------
filled : Series/Index of objects
"""
result = str_pad(self._data, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._data, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._data, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._data, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._data, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = ("""
Strip whitespace (including newlines) from each string in the
Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
stripped : Series/Index of objects
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._data, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._data
result, name = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical),
name=name, expand=True)
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._data, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0, expand=None):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] %
dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] %
dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._data)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] %
dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] %
dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Compute length of each string in the Series/Index.
Returns
-------
lengths : Series/Index of integer values
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
converted : Series/Index of objects
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string in the Series/Index
are %(type)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
is : Series/array of boolean values
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
@classmethod
def _make_accessor(cls, data):
from pandas.core.index import Index
if (isinstance(data, ABCSeries) and
not ((is_categorical_dtype(data.dtype) and
is_object_dtype(data.values.categories)) or
(is_object_dtype(data.dtype)))):
# it's neither a string series not a categorical series with
# strings inside the categories.
# this really should exclude all series with any non-string values
# (instead of test for object dtype), but that isn't practical for
# performance reasons until we have a str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(data, Index):
# can't use ABCIndex to exclude non-str
# see scc/inferrence.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if data.inferred_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or "
"'mixed')")
raise AttributeError(message)
if data.nlevels > 1:
message = ("Can only use .str accessor with Index, not "
"MultiIndex")
raise AttributeError(message)
return cls(data)
|
bsd-3-clause
|
effigies/mne-python
|
examples/decoding/plot_decoding_csp_eeg.py
|
2
|
5586
|
"""
===========================================================================
Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
===========================================================================
Decoding of motor imagery applied to EEG data decomposed using CSP.
Here the classifier is applied to features extracted on CSP filtered signals.
See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
The EEGBCI dataset is documented in [2]
The data set is available at PhysioNet [3]
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
[2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
(BCI) System. IEEE TBME 51(6):1034-1043
[3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource for
Complex Physiologic Signals. Circulation 101(23):e215-e220
"""
# Authors: Martin Billinger <[email protected]>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, pick_types
from mne.io import concatenate_raws
from mne.io.edf import read_raw_edf
from mne.datasets import eegbci
from mne.event import find_events
from mne.decoding import CSP
from mne.layouts import read_layout
###############################################################################
## Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, tal_channel=-1, preload=True) for f in raw_fnames]
raw = concatenate_raws(raw_files)
# strip channel names
raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]
# Apply band-pass filter
raw.filter(7., 30., method='iir')
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True, add_eeg_ref=False)
epochs_train = epochs.crop(tmin=1., tmax=2., copy=True)
labels = epochs.events[:, -1] - 2
###############################################################################
# Classification with linear discrimant analysis
from sklearn.lda import LDA
from sklearn.cross_validation import ShuffleSplit
# Assemble a classifier
svc = LDA()
csp = CSP(n_components=4, reg=None, log=True)
# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
# Use scikit-learn Pipeline with cross_val_score function
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import cross_val_score
clf = Pipeline([('CSP', csp), ('SVC', svc)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
evoked = epochs.average()
evoked.data = csp.patterns_.T
evoked.times = np.arange(evoked.data.shape[0])
layout = read_layout('EEG1005')
evoked.plot_topomap(times=[0, 1, 2, 61, 62, 63], ch_type='eeg', layout=layout,
scale_time=1, time_format='%i', scale=1,
unit='Patterns (AU)', size=1.5)
###############################################################################
# Look at performance over time
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
svc.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(svc.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/tests/scalar/test_period.py
|
6
|
50302
|
import pytest
import numpy as np
from datetime import datetime, date, timedelta
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas.compat import text_type, iteritems
from pandas.compat.numpy import np_datetime64_compat
from pandas._libs import tslib, period as libperiod
from pandas import Period, Timestamp, offsets
from pandas.tseries.frequencies import DAYS, MONTHS
class TestPeriodProperties(object):
"Test properties such as year, month, weekday, etc...."
def test_is_leap_year(self):
# GH 13727
for freq in ['A', 'M', 'D', 'H']:
p = Period('2000-01-01 00:00:00', freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period('1999-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
p = Period('2004-01-01 00:00:00', freq=freq)
assert p.is_leap_year
p = Period('2100-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq='Q-DEC')
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq='M')
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
assert '1989Q3' in str(exp)
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
assert p == exp
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = pd.Period('2011-01', freq='M')
res = pd.Period._from_ordinal(p.ordinal, freq='M')
assert p == res
assert isinstance(res, Period)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
assert p is pd.NaT
p = Period('nat', freq='W-SUN')
assert p is pd.NaT
p = Period(tslib.iNaT, freq='D')
assert p is pd.NaT
p = Period(tslib.iNaT, freq='3D')
assert p is pd.NaT
p = Period(tslib.iNaT, freq='1D1H')
assert p is pd.NaT
p = Period('NaT')
assert p is pd.NaT
p = Period(tslib.iNaT)
assert p is pd.NaT
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == '3M'
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == 'M'
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == '3M'
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == '3M'
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='0M')
def test_period_cons_combined(self):
p = [(Period('2011-01', freq='1D1H'),
Period('2011-01', freq='1H1D'),
Period('2011-01', freq='H')),
(Period(ordinal=1, freq='1D1H'),
Period(ordinal=1, freq='1H1D'),
Period(ordinal=1, freq='H'))]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == '25H'
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == '25H'
assert p3.freq == offsets.Hour()
assert p3.freqstr == 'H'
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == '25H'
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == '25H'
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == '25H'
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == '25H'
msg = ('Frequency must be positive, because it'
' represents span: -25H')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='-1D1H')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='-1H1D')
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq='-1D1H')
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq='-1H1D')
msg = ('Frequency must be positive, because it'
' represents span: 0D')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='0D0H')
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq='0D0H')
# You can only combine together day and intraday offsets
msg = ('Invalid frequency: 1W1D')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='1W1D')
msg = ('Invalid frequency: 1D1W')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='1D1W')
def test_timestamp_tz_arg(self):
tm._skip_if_no_pytz()
import pytz
for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='3H').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
def test_timestamp_tz_arg_dateutil(self):
from pandas._libs.tslib import _dateutil_gettz as gettz
from pandas._libs.tslib import maybe_get_tz
for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo',
'dateutil/US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(
tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
assert p == exp
assert p.tz == gettz(case.split('/', 1)[1])
assert p.tz == exp.tz
p = Period('1/1/2005',
freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
assert p == exp
assert p.tz == gettz(case.split('/', 1)[1])
assert p.tz == exp.tz
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas._libs.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005',
freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
assert p.tz == gettz('Europe/Brussels')
def test_timestamp_mult(self):
p = pd.Period('2011-01', freq='M')
assert p.to_timestamp(how='S') == pd.Timestamp('2011-01-01')
assert p.to_timestamp(how='E') == pd.Timestamp('2011-01-31')
p = pd.Period('2011-01', freq='3M')
assert p.to_timestamp(how='S') == pd.Timestamp('2011-01-01')
assert p.to_timestamp(how='E') == pd.Timestamp('2011-03-31')
def test_construction(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
assert i1 == i2
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
assert i1 == i2
assert i1 == i3
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
pytest.raises(ValueError, i1.__ne__, i4)
assert i4 == i5
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
assert i1 == i2
assert i1 == i3
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
assert i1 == i2
i2 = Period('1982', freq=('Min', 1))
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq='d')
assert i1 == i3
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
assert i1 == expected
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
assert i1 == expected
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
assert i1 == expected
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
assert i1 == expected
pytest.raises(ValueError, Period, ordinal=200701)
pytest.raises(ValueError, Period, '2007-1-1', freq='X')
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
assert i1 == i2.asfreq('B')
i2 = Period('3/11/12', freq='D')
assert i1 == i2.asfreq('B')
i2 = Period('3/12/12', freq='D')
assert i1 == i2.asfreq('B')
i3 = Period('3/10/12', freq='b')
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
assert i1 == i2
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
assert i1 == i2
assert i1 == i3
i1 = Period('05Q1')
assert i1 == i2
lower = Period('05q1')
assert i1 == lower
i1 = Period('1Q2005')
assert i1 == i2
lower = Period('1q2005')
assert i1 == lower
i1 = Period('1Q05')
assert i1 == i2
lower = Period('1q05')
assert i1 == lower
i1 = Period('4Q1984')
assert i1.year == 1984
lower = Period('4q1984')
assert i1 == lower
def test_construction_month(self):
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
assert i1 == expected
i1 = Period('200701', freq='M')
assert i1 == expected
i1 = Period(200701, freq='M')
assert i1 == expected
i1 = Period(ordinal=200701, freq='M')
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert (Period('1/1/2005', freq=offsets.MonthEnd()) ==
Period('1/1/2005', freq='M'))
assert (Period('2005', freq=offsets.YearEnd()) ==
Period('2005', freq='A'))
assert (Period('2005', freq=offsets.MonthEnd()) ==
Period('2005', freq='M'))
assert (Period('3/10/12', freq=offsets.BusinessDay()) ==
Period('3/10/12', freq='B'))
assert (Period('3/10/12', freq=offsets.Day()) ==
Period('3/10/12', freq='D'))
assert (Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)) ==
Period(year=2005, quarter=1, freq='Q'))
assert (Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)) ==
Period(year=2005, quarter=2, freq='Q'))
assert (Period(year=2005, month=3, day=1, freq=offsets.Day()) ==
Period(year=2005, month=3, day=1, freq='D'))
assert (Period(year=2012, month=3, day=10, freq=offsets.BDay()) ==
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
assert (Period(year=2005, month=3, day=1,
freq=offsets.Day(3)) == expected)
assert Period(year=2005, month=3, day=1, freq='3D') == expected
assert (Period(year=2012, month=3, day=10,
freq=offsets.BDay(3)) ==
Period(year=2012, month=3, day=10, freq='3B'))
assert (Period(200701, freq=offsets.MonthEnd()) ==
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
assert i1 == expected
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
assert i1 == expected
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
assert i1 == expected
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
assert i1 == expected
pytest.raises(ValueError, Period, ordinal=200701)
pytest.raises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
assert i1.freq == offsets.Minute()
assert i1.freqstr == 'T'
def test_period_deprecated_freq(self):
cases = {"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]}
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
for exp, freqs in iteritems(cases):
for freq in freqs:
with tm.assert_raises_regex(ValueError, msg):
Period('2016-03-01 09:00', freq=freq)
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period('2016-03-01 09:00', freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def test_hash(self):
assert (hash(Period('2011-01', freq='M')) ==
hash(Period('2011-01', freq='M')))
assert (hash(Period('2011-01-01', freq='D')) !=
hash(Period('2011-01', freq='M')))
assert (hash(Period('2011-01', freq='3M')) !=
hash(Period('2011-01', freq='2M')))
assert (hash(Period('2011-01', freq='M')) !=
hash(Period('2011-02', freq='M')))
def test_repr(self):
p = Period('Jan-2000')
assert '2000-01' in repr(p)
p = Period('2000-12-15')
assert '2000-12-15' in repr(p)
def test_repr_nat(self):
p = Period('nat', freq='M')
assert repr(tslib.NaT) in repr(p)
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
assert res == '2000-01-01 12:34:12'
assert isinstance(res, text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
assert result == 4
with pytest.raises(period.IncompatibleFrequency):
left - Period('2007-01', freq='M')
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
assert start_ts == p.to_timestamp('D', how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp('3D', how=a)
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
assert end_ts == p.to_timestamp('D', how=a)
assert end_ts == p.to_timestamp('3D', how=a)
from_lst = ['A', 'Q', 'M', 'W', 'B', 'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how='S')
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
assert result == expected
result = p.to_timestamp('3H', how='end')
assert result == expected
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
assert result == expected
result = p.to_timestamp('2T', how='end')
assert result == expected
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
assert result == expected
result = p.to_timestamp('T', how='start')
assert result == expected
result = p.to_timestamp('S', how='start')
assert result == expected
result = p.to_timestamp('3H', how='start')
assert result == expected
result = p.to_timestamp('5S', how='start')
assert result == expected
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
assert p.start_time == xp
assert Period('2012', freq='B').start_time == datetime(2012, 1, 2)
assert Period('2012', freq='W').start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period('2012', freq='D')
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period('2012', freq='H')
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period('2012', freq='B')
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period('2012', freq='W')
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period('2012', freq='15D')
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period('2012', freq='1D1H')
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period('2012', freq='1H1D')
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq='W', year=2012,
month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq='W', year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq='B', year=2012,
month=2, day=1).days_in_month == 29
d_date = Period(freq='D', year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq='D', year=2012, month=2,
day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month == 29
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month == 29
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month == 29
def test_pnow(self):
# deprecation, xref #13790
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
period.pnow('D')
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
assert Period(year=2007, month=1, freq='2M') == expected
pytest.raises(ValueError, Period, datetime.now())
pytest.raises(ValueError, Period, datetime.now().date())
pytest.raises(ValueError, Period, 1.6, freq='D')
pytest.raises(ValueError, Period, ordinal=1.6, freq='D')
pytest.raises(ValueError, Period, ordinal=2, value=1, freq='D')
assert Period(None) is pd.NaT
pytest.raises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
assert result == exp
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
assert p.freq == 'D'
p = Period('2007-01-01 07')
assert p.freq == 'H'
p = Period('2007-01-01 07:10')
assert p.freq == 'T'
p = Period('2007-01-01 07:10:15')
assert p.freq == 'S'
p = Period('2007-01-01 07:10:15.123')
assert p.freq == 'L'
p = Period('2007-01-01 07:10:15.123000')
assert p.freq == 'L'
p = Period('2007-01-01 07:10:15.123400')
assert p.freq == 'U'
def test_badinput(self):
pytest.raises(ValueError, Period, '-2000', 'A')
pytest.raises(tslib.DateParseError, Period, '0', 'A')
pytest.raises(tslib.DateParseError, Period, '1/1/-2000', 'A')
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
assert result1.ordinal == result2.ordinal
assert result1.freqstr == '2A-DEC'
assert result2.freqstr == 'A-DEC'
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
def test_round_trip(self):
p = Period('2000Q1')
new_p = tm.round_trip_pickle(p)
assert new_p == p
class TestPeriodField(object):
def test_get_period_field_raises_on_out_of_range(self):
pytest.raises(ValueError, libperiod.get_period_field, -1, 0, 0)
def test_get_period_field_array_raises_on_out_of_range(self):
pytest.raises(ValueError, libperiod.get_period_field_arr, -1,
np.empty(1), 0)
class TestComparisons(object):
def setup_method(self, method):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
assert self.january1 == self.january2
def test_equal_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 == self.day
def test_notEqual(self):
assert self.january1 != 1
assert self.january1 != self.february
def test_greater(self):
assert self.february > self.january1
def test_greater_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 > self.day
def test_greater_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
assert self.january1 >= self.january2
def test_greaterEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 >= self.day
with pytest.raises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
assert self.january1 <= self.january2
def test_smallerEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 <= 1
def test_smaller(self):
assert self.january1 < self.february
def test_smaller_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
assert sorted(periods) == correctPeriods
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), (nat, t),
(t, nat), (nat, nat)]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestMethods(object):
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert dt1 + 1 == dt2
assert 1 + dt1 == dt2
def test_add_pdnat(self):
p = pd.Period('2011-01', freq='M')
assert p + pd.NaT is pd.NaT
assert pd.NaT + p is pd.NaT
p = pd.Period('NaT', freq='M')
assert p + pd.NaT is pd.NaT
assert pd.NaT + p is pd.NaT
def test_add_raises(self):
# GH 4731
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
msg = r"unsupported operand type\(s\)"
with tm.assert_raises_regex(TypeError, msg):
dt1 + "str"
msg = r"unsupported operand type\(s\)"
with tm.assert_raises_regex(TypeError, msg):
"str" + dt1
with tm.assert_raises_regex(TypeError, msg):
dt1 + dt2
def test_sub(self):
dt1 = Period('2011-01-01', freq='D')
dt2 = Period('2011-01-15', freq='D')
assert dt1 - dt2 == -14
assert dt2 - dt1 == 14
msg = r"Input has different freq=M from Period\(freq=D\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
dt1 - pd.Period('2011-02', freq='M')
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
exp = Period('2013', freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
exp = Period('2011-05', freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period('2012-03', freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
exp = Period('2011-04-06', freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period('2011-04-02', freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period('2011-04-03', freq=freq)
assert p + np.timedelta64(2, 'D') == exp
with pytest.raises(TypeError):
np.timedelta64(2, 'D') + p
exp = Period('2011-04-02', freq=freq)
assert p + np.timedelta64(3600 * 24, 's') == exp
with pytest.raises(TypeError):
np.timedelta64(3600 * 24, 's') + p
exp = Period('2011-03-30', freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period('2011-04-03', freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
exp = Period('2011-04-03 09:00', freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period('2011-04-01 12:00', freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
exp = Period('2011-04-01 12:00', freq=freq)
assert p + np.timedelta64(3, 'h') == exp
with pytest.raises(TypeError):
np.timedelta64(3, 'h') + p
exp = Period('2011-04-01 10:00', freq=freq)
assert p + np.timedelta64(3600, 's') == exp
with pytest.raises(TypeError):
np.timedelta64(3600, 's') + p
exp = Period('2011-04-01 11:00', freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period('2011-04-05 12:00', freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
assert p + o is tslib.NaT
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
assert p + o is tslib.NaT
if not isinstance(o, np.timedelta64):
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
def test_sub_pdnat(self):
# GH 13071
p = pd.Period('2011-01', freq='M')
assert p - pd.NaT is pd.NaT
assert pd.NaT - p is pd.NaT
p = pd.Period('NaT', freq='M')
assert p - pd.NaT is pd.NaT
assert pd.NaT - p is pd.NaT
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
assert p - offsets.YearEnd(2) == Period('2009', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
assert p - offsets.MonthEnd(2) == Period('2011-01', freq=freq)
assert p - offsets.MonthEnd(12) == Period('2010-03', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
assert p - offsets.Day(5) == Period('2011-03-27', freq=freq)
assert p - offsets.Hour(24) == Period('2011-03-31', freq=freq)
assert p - np.timedelta64(2, 'D') == Period(
'2011-03-30', freq=freq)
assert p - np.timedelta64(3600 * 24, 's') == Period(
'2011-03-31', freq=freq)
assert p - timedelta(-2) == Period('2011-04-03', freq=freq)
assert p - timedelta(hours=48) == Period('2011-03-30', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
assert p - offsets.Day(2) == Period('2011-03-30 09:00', freq=freq)
assert p - offsets.Hour(3) == Period('2011-04-01 06:00', freq=freq)
assert p - np.timedelta64(3, 'h') == Period(
'2011-04-01 06:00', freq=freq)
assert p - np.timedelta64(3600, 's') == Period(
'2011-04-01 08:00', freq=freq)
assert p - timedelta(minutes=120) == Period(
'2011-04-01 07:00', freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
'2011-03-28 06:00', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p - o is tslib.NaT
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p - o is tslib.NaT
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
assert p - o is tslib.NaT
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
assert p - o is tslib.NaT
def test_nat_ops(self):
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
assert p + 1 is tslib.NaT
assert 1 + p is tslib.NaT
assert p - 1 is tslib.NaT
assert p - Period('2011-01', freq=freq) is tslib.NaT
assert Period('2011-01', freq=freq) - p is tslib.NaT
def test_period_ops_offset(self):
p = Period('2011-04-01', freq='D')
result = p + offsets.Day()
exp = pd.Period('2011-04-02', freq='D')
assert result == exp
result = p - offsets.Day(2)
exp = pd.Period('2011-03-30', freq='D')
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
p + offsets.Hour(2)
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
p - offsets.Hour(2)
|
mit
|
gingerwizard/examples
|
Exploring Public Datasets/donorschoose/scripts/donorschoose_process_data.py
|
3
|
6842
|
# coding: utf-8
### Import Packages
import pandas as pd
import numpy as np
import elasticsearch
import re
import json
from datetime import datetime
from elasticsearch import helpers
import timeit
# Define elasticsearch class
es = elasticsearch.Elasticsearch()
### Helper Functions
# convert np.int64 into int. json.dumps does not work with int64
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int64):
return np.int(obj)
# else
return json.JSONEncoder.default(self, obj)
# Convert datestamp into ISO format
def str_to_iso(text):
if text != '':
for fmt in ('%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d'):
try:
return datetime.isoformat(datetime.strptime(text, fmt))
except ValueError:
pass
raise ValueError('no valid date format found')
else:
return None
# Custom groupby function
def concatdf(x):
if len(x) > 1: #if multiple values
return list(x)
else: #if single value
return x.iloc[0]
### Import Data
# Load projects, resources & donations data
print("Loading datasets")
projects = pd.read_csv('./data/opendata_projects000.gz', escapechar='\\', names=['projectid', 'teacher_acctid', 'schoolid', 'school_ncesid', 'school_latitude', 'school_longitude', 'school_city', 'school_state', 'school_zip', 'school_metro', 'school_district', 'school_county', 'school_charter', 'school_magnet', 'school_year_round', 'school_nlns', 'school_kipp', 'school_charter_ready_promise', 'teacher_prefix', 'teacher_teach_for_america', 'teacher_ny_teaching_fellow', 'primary_focus_subject', 'primary_focus_area' ,'secondary_focus_subject', 'secondary_focus_area', 'resource_type', 'poverty_level', 'grade_level', 'vendor_shipping_charges', 'sales_tax', 'payment_processing_charges', 'fulfillment_labor_materials', 'total_price_excluding_optional_support', 'total_price_including_optional_support', 'students_reached', 'total_donations', 'num_donors', 'eligible_double_your_impact_match', 'eligible_almost_home_match', 'funding_status', 'date_posted', 'date_completed', 'date_thank_you_packet_mailed', 'date_expiration'])
donations = pd.read_csv('./data/opendata_donations000.gz', escapechar='\\', names=['donationid', 'projectid', 'donor_acctid', 'cartid', 'donor_city', 'donor_state', 'donor_zip', 'is_teacher_acct', 'donation_timestamp', 'donation_to_project', 'donation_optional_support', 'donation_total', 'donation_included_optional_support', 'payment_method', 'payment_included_acct_credit', 'payment_included_campaign_gift_card', 'payment_included_web_purchased_gift_card', 'payment_was_promo_matched', 'is_teacher_referred', 'giving_page_id', 'giving_page_type', 'for_honoree', 'thank_you_packet_mailed'])
resources = pd.read_csv('./data/opendata_resources000.gz', escapechar='\\', names=['resourceid', 'projectid', 'vendorid', 'vendor_name', 'item_name', 'item_number', 'item_unit_price', 'item_quantity'])
### Data Cleanup
# replace nan with ''
print("Cleaning Data")
projects = projects.fillna('')
donations = donations.fillna('')
resources = resources.fillna('')
# Clean up column names: remove _ at the start of column name
donations.columns = donations.columns.map(lambda x: re.sub('^ ', '', x))
donations.columns = donations.columns.map(lambda x: re.sub('^_', '', x))
projects.columns = projects.columns.map(lambda x: re.sub('^_', '', x))
resources.columns = resources.columns.map(lambda x: re.sub('^ ', '', x))
resources.columns = resources.columns.map(lambda x: re.sub('^_', '', x))
# Add quotes around projectid values to match format in projects / donations column
resources['projectid'] = resources['projectid'].map(lambda x: '"' + x +'"')
# Add resource_prefix to column names
resources.rename(columns={'vendorid': 'resource_vendorid', 'vendor_name': 'resource_vendor_name', 'item_name': 'resource_item_name',
'item_number' :'resource_item_number', "item_unit_price": 'resource_item_unit_price',
'item_quantity': 'resource_item_quantity'}, inplace=True)
### Merge multiple resource row per projectid into a single row
# NOTE: section may take a few minutes to execute
print("Grouping Data by ProjectId")
concat_resource = pd.DataFrame()
gb = resources.groupby('projectid')
start = timeit.timeit()
for a in resources.columns.values:
print(a)
concat_resource[a] = gb[a].apply(lambda x: concatdf(x))
#print(xx.index)
end = timeit.timeit()
print(end - start)
concat_resource['projectid'] = concat_resource.index;
concat_resource.reset_index(drop=True);
### Rename Project columns
projects.rename(columns=lambda x: "project_" + x, inplace=True)
projects.rename(columns={"project_projectid": "projectid"}, inplace=True)
projects.columns.values
#### Merge data into single frame
print("Merging datasets")
data = pd.merge(projects, concat_resource, how='left', right_on='projectid', left_on='projectid')
data = pd.merge(donations, data, how='left', right_on='projectid', left_on='projectid')
data = data.fillna('')
#### Process columns
# Modify date formats
data['project_date_expiration'] = data['project_date_expiration'].map(lambda x: str_to_iso(x));
data['project_date_posted'] = data['project_date_posted'].map(lambda x: str_to_iso(x))
data['project_date_thank_you_packet_mailed'] = data['project_date_thank_you_packet_mailed'].map(lambda x: str_to_iso(x))
data['project_date_completed'] = data['project_date_completed'].map(lambda x: str_to_iso(x))
data['donation_timestamp'] = data['donation_timestamp'].map(lambda x: str_to_iso(x))
# Create location field that combines lat/lon information
data['project_location'] = data[['project_school_longitude','project_school_latitude']].values.tolist()
del(data['project_school_latitude']) # delete latitude field
del(data['project_school_longitude']) # delete longitude
### Create and configure Elasticsearch index
print("Preparing to Index to ES")
# Name of index and document type
index_name = 'donorschoose'
doc_name = 'donation'
# Delete donorschoose index if one does exist
if es.indices.exists(index_name):
es.indices.delete(index_name)
# Create donorschoose index
es.indices.create(index_name)
# Add mapping
with open('donorschoose_mapping.json') as json_mapping:
d = json.load(json_mapping)
es.indices.put_mapping(index=index_name, doc_type=doc_name, body=d)
def read_data(data):
for don_id, thisDonation in data.iterrows():
# print every 10000 iteration
if don_id % 10000 == 0:
print(don_id)
doc={}
doc["_index"]=index_name
doc["_id"]=thisDonation['donationid']
doc["_type"]=doc_name
doc["_source"]=thisDonation.to_dict()
yield doc
### Index Data into Elasticsearch
print("Indexing")
helpers.bulk(es,read_data(data),index=index_name,doc_type=doc_name)
|
apache-2.0
|
camallen/aggregation
|
experimental/penguins/dbscan.py
|
2
|
5533
|
#!/usr/bin/env python
import pymongo
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import numpy as np
import os
import urllib
import matplotlib.cbook as cbook
from sklearn.datasets.samples_generator import make_blobs
from matplotlib.patches import Ellipse
from copy import deepcopy
__author__ = 'greghines'
client = pymongo.MongoClient()
db = client['penguins']
collection = db["penguin_classifications"]
penguins = {}
adults = {}
chicks = {}
eggs = {}
count = {}
fNames = {}
pCount = {}
i = 0
pen = 0
total = 0
for r in collection.find():
for a in r["annotations"]:
if ('value' in a) and not(a["value"] in ["penguin", "adult", "no", "yes", "finished", "unfinished", "cant_tell", "", "chick", "eggs", "other"]):
zooniverseID = r["subjects"][0]["zooniverse_id"]
if not(zooniverseID in adults):
penguins[zooniverseID] = []
adults[zooniverseID] = []
chicks[zooniverseID] = []
eggs[zooniverseID] = []
count[zooniverseID] = 1
url = r["subjects"][0]["location"]["standard"]
fNames[zooniverseID] = url.split("/")[-1]
else:
count[zooniverseID] += 1
penguins[zooniverseID].append(len(a["value"]))
for index in a["value"]:
point = a["value"][index]
if point["value"] == "adult":
adults[zooniverseID].append((float(point["x"]),float(point["y"])))
elif point["value"] == "chick":
chicks[zooniverseID].append((float(point["x"]),float(point["y"])))
elif point["value"] == "eggs":
eggs[zooniverseID].append((float(point["x"]),float(point["y"])))
else:
pass
#print point["value"]
#penguins[zooniverseID].append((float(point["x"]),float(point["y"])))
overallCount = {2:0,3:0,4:0,5:0}
for zooniverseID in penguins:
pCount[zooniverseID] = np.mean(penguins[zooniverseID])
#print sorted(pCount.items(),key = lambda x:x[1])
#assert(False)
print count["APZ00003i8"]
for zooniverseID,c in count.items():
if c >= 3:
#overallCount[c] += 1
#if zooniverseID in ["APZ00004er","APZ00004er","APZ00003h1"]: #!= "APZ00003lc":
# continue
print str(zooniverseID) + "," + str(c)
#print "/home/greg/Databases/penguins/images"+fNames[zooniverseID]
#print zooniverseID,pCount[zooniverseID]
#if zooniverseID in ["APZ00003lc","APZ00002ea","APZ00003l4"]:
# continue
#print (zooniverseID,c)
#do we already have this file?
if not(os.path.isfile("/home/greg/Databases/penguins/images/"+fNames[zooniverseID])):
urllib.urlretrieve ("http://demo.zooniverse.org/penguins/subjects/standard/"+fNames[zooniverseID], "/home/greg/Databases/penguins/images/"+fNames[zooniverseID])
image_file = cbook.get_sample_data("/home/greg/Databases/penguins/images/"+fNames[zooniverseID])
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
fOut = open("/home/greg/Databases/penguins/dbscan/"+fNames[zooniverseID][:-4]+".csv","wb")
fOut.write("penguinType,xCoord,yCoord\n")
for colour,penType,data in [("green","adult",adults),("blue","chick",chicks),("red","egg",eggs)]:
X = np.array(data[zooniverseID])
db = DBSCAN(eps=20, min_samples=2).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
#print('Estimated number of clusters: %d' % n_clusters_)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k in unique_labels:
class_member_mask = (labels == k)
xy = X[class_member_mask]
if k == -1:
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor='k',markeredgecolor='k', markersize=6)
else:
xSet,ySet = zip(*list(X[class_member_mask]))
x = np.mean(xSet)
y = np.mean(ySet)
plt.plot(x, y, 'o', markerfacecolor=colour,markeredgecolor='k', markersize=6)
fOut.write(penType+","+str(x)+","+str(y)+"\n")
fOut.close()
# for k, col in zip(unique_labels, colors):
# if k == -1:
# # Black used for noise.
# col = 'k'
#
# class_member_mask = (labels == k)
#
# xy = X[class_member_mask & core_samples_mask]
# plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
# markeredgecolor='k', markersize=14)
#
# xy = X[class_member_mask & ~core_samples_mask]
# plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
# markeredgecolor='k', markersize=6)
#
plt.title('Number of users: %d' % (c))
plt.savefig("/home/greg/Databases/penguins/dbscan/"+fNames[zooniverseID])
plt.close()
#plt.show()
#break
print overallCount
|
apache-2.0
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_extension.py
|
9
|
2254
|
from __future__ import print_function
import array
import pandas.msgpack as msgpack
from pandas.msgpack import ExtType
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b'A') == b'\xd4\x42A' # fixext 1
assert p(b'AB') == b'\xd5\x42AB' # fixext 2
assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
assert p(b'A'*16) == b'\xd8\x42' + b'A'*16 # fixext 16
assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
assert p(b'A'*0x0123) == b'\xc8\x01\x23\x42' + b'A'*0x0123 # ext 16
assert p(b'A'*0x00012345) == b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345 # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
check(b'\xd8\x42' + b'A'*16, ExtType(0x42, b'A'*16)) # fixext 16
check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
check(b'\xc8\x01\x23\x42' + b'A'*0x0123,
ExtType(0x42, b'A'*0x0123)) # ext 16
check(b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345,
ExtType(0x42, b'A'*0x00012345)) # ext 32
def test_extension_type():
def default(obj):
print('default called', obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = obj.tostring()
return ExtType(typecode, data)
raise TypeError("Unknwon type object %r" % (obj,))
def ext_hook(code, data):
print('ext_hook called', code, data)
assert code == 123
obj = array.array('d')
obj.fromstring(data)
return obj
obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
|
artistic-2.0
|
AndersenLab/cegwas-web
|
mapping_worker/utils/vcf_np.py
|
2
|
20392
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Daniel E. Cook
Script/Tools for working with a VCF in python.
Used for generating the interval summary.
"""
import json
import re
import pandas as pd
import numpy as np
import itertools
from collections import defaultdict, Counter
from cyvcf2 import VCF
from pandas import DataFrame, Series
from logzero import logger
from functools import reduce
def infinite_dict():
return defaultdict(infinite_dict)
def flatten_cols(df):
"""
Flattens hierarchical columns
Stack Overflow: 14507794
"""
df.columns = [
'_'.join(tuple(map(str, t))).rstrip('_')
for t in df.columns.values
]
return df
ANN_FIELDS = ["allele",
"effect",
"impact",
"gene_name",
"gene_id",
"feature_type",
"feature_id",
"transcript_biotype",
"exon_intron_rank",
"nt_change",
"aa_change",
"cdna_pos",
"protein_position",
"distance_to_feature",
"error"]
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
class AnnotationItem(Series):
@property
def _constructor(self):
return AnnotationItem
@property
def _constructor_expanddim(self):
return VCF_DataFrame
def __eq__(self, other):
return AnnotationItem(self.apply(lambda row: other in row if type(row) == list else False))
@property
def length(self):
result = self.apply(lambda row: len(row) if type(row) == list else 0)
return AnnotationItem(data=result)
class AnnotationSeries(Series):
# https://stackoverflow.com/q/48435082/2615190
our_column_names = ('ANN',)
def __new__(cls, *args, **kwargs):
if kwargs.get('name', '') in cls.our_column_names:
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
return pd.Series(*args, **kwargs)
def __eq__(self, other):
return self.apply(lambda row: other in row if type(row) == list else False)
@property
def _constructor(self):
return AnnotationSeries
@property
def _constructor_expanddim(self):
return VCF_DataFrame
def _fetch_field(self, field):
"""
Highly redundant - but I could
figure out a way to dynamically specify properties.
"""
ann_column_index = ANN_FIELDS.index(field)
result = self.apply(lambda row: [x[ann_column_index] for x in row] if type(row) == list else np.nan)
return AnnotationSeries(data=result, name='ANN')
@property
def allele(self):
result = self._fetch_field('allele')
return AnnotationItem(data=result, name='ANN')
@property
def effect(self):
result = self._fetch_field('effect')
return AnnotationItem(data=result, name='ANN')
@property
def impact(self):
result = self._fetch_field('impact')
return AnnotationItem(data=result, name='ANN')
@property
def gene_name(self):
result = self._fetch_field('gene_name')
return AnnotationItem(data=result, name='ANN')
@property
def gene_id(self):
result = self._fetch_field('gene_id')
return AnnotationItem(data=result, name='ANN')
@property
def feature_type(self):
result = self._fetch_field('feature_type')
return AnnotationItem(data=result, name='ANN')
@property
def feature_id(self):
result = self._fetch_field('feature_id')
return AnnotationItem(data=result, name='ANN')
@property
def transcript_biotype(self):
result = self._fetch_field('transcript_biotype')
return AnnotationItem(data=result, name='ANN')
@property
def exon_intron_rank(self):
result = self._fetch_field('exon_intron_rank')
return AnnotationItem(data=result, name='ANN')
@property
def nt_change(self):
result = self._fetch_field('nt_change')
return AnnotationItem(data=result, name='ANN')
@property
def aa_change(self):
result = self._fetch_field('aa_change')
return AnnotationItem(data=result, name='ANN')
@property
def cnda_pos(self):
result = self._fetch_field('cnda_pos')
return AnnotationItem(data=result, name='ANN')
@property
def protein_pos(self):
result = self._fetch_field('protein_pos')
return AnnotationItem(data=result, name='ANN')
@property
def distance_to_feature(self):
result = self._fetch_field('distance_to_feature')
return AnnotationItem(data=result, name='ANN')
@property
def error(self):
result = self._fetch_field('error')
return AnnotationItem(data=result, name='ANN')
class VCF_DataFrame(DataFrame):
_metadata = ['samples', 'interval', 'chrom', 'start', 'end']
attrs = ['CHROM',
'POS',
'ID',
'REF',
'ALT',
'QUAL',
'FILTER',
'start',
'end',
'aaf',
'nucl_diversity',
'is_snp',
'is_indel',
'call_rate',
'num_called',
'num_het',
'num_hom_ref',
'num_hom_alt',
'ploidy',
'is_transition']
def __init__(self, *args, **kwargs):
super(VCF_DataFrame, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return VCF_DataFrame
@property
def _constructor_sliced(self):
return AnnotationSeries
@classmethod
def from_vcf(cls, filename, interval=None):
"""
Create a numpy-array VCF object.
filename:
Name of the VCF
interval:
An interval of the VCF to use (chrom:start-end)
"""
vcf = VCF(filename, gts012=True)
rows = []
for i, line in enumerate(vcf(interval)):
var_line = {}
var_line = {attr: getattr(line, attr) for attr in cls.attrs if hasattr(line, attr)}
# Currently string lists must be encoded using python.
var_line['FT'] = line.format("FT")
var_line['TGT'] = line.gt_bases
var_line['DP'] = line.format("DP").flatten().astype(np.int64)
var_line['GT'] = line.gt_types.astype(np.int64)
ANN = line.INFO.get("ANN")
if ANN:
var_line['ANN'] = [x.split("|") for x in ANN.split(",")]
rows.append(var_line)
dataset = DataFrame.from_dict(rows)
# Convert to categorical
dataset.REF = pd.Categorical(dataset.REF)
dataset.FILTER = pd.Categorical(dataset.FILTER)
# Add num missing column
dataset['num_missing'] = dataset.GT.apply(lambda row: np.sum(np.isin(row, ['./.', '.|.'])))
# Use ordered CHROM
dataset.CHROM = pd.Categorical(dataset.CHROM,
ordered=True,
categories=vcf.seqnames)
dataset.REF = pd.Categorical(dataset.REF)
dataset.FILTER = pd.Categorical(dataset.FILTER)
# Add samples
dataset = VCF_DataFrame(dataset)
dataset.samples = np.array(vcf.samples)
if interval:
dataset.interval = interval
chrom, start, end = re.split(":|\-", interval)
dataset.chrom = chrom
dataset.start = int(start)
dataset.end = int(end)
dataset['allele_set'] = dataset.TGT.apply(lambda x: set([a for a in sum([re.split("\||\/", i) for i in x], []) if a != '.']))
return dataset
def _prune_non_snps(self):
"""
Remove snps not present in the VCF (monomorphic sites)
Also will remove sites that are all missing.
"""
non_snps = self.GT.apply(lambda x: len(set(x[~np.isnan(x)])) > 1)
return self[non_snps]
def _prune_alleles(self):
"""
Remove ANN that are not present in the set of subset samples
"""
self['allele_set'] = self.TGT.apply(lambda x: set([a for a in sum([re.split("\||\/", i) for i in x], []) if a != '.']))
self[~self.ANN.isna()].ANN = self[~self.ANN.isna()].apply(lambda row: [i for i in row['ANN'] if i[0] in row.allele_set], axis=1)
return self
def subset_samples(self, samples, prune_non_snps=True, inplace=False):
"""
Subset samples
"""
sample_bool_keep = np.isin(self.samples, samples)
df = self.copy()
# Subset GT
df.GT = df.GT.apply(lambda row: row[sample_bool_keep])
df.TGT = df.TGT.apply(lambda row: row[sample_bool_keep])
df.DP = df.DP.apply(lambda row: row[sample_bool_keep])
df.FT = df.FT.apply(lambda row: row[sample_bool_keep])
# Update variables
df.num_hom_ref = df.GT.apply(lambda row: np.sum(row == 0))
df.num_het = df.GT.apply(lambda row: np.sum(row == 1))
df.num_hom_alt = df.GT.apply(lambda row: np.sum(row == 2))
df.num_missing = df.TGT.apply(lambda row: np.sum(np.isin(row, ['./.', '.|.'])))
df.missing_rate = df.num_missing
# Do not change '==' to 'is'; numpy doesn't use 'in'.
df.num_called = df.TGT.apply(lambda row: np.sum(np.isin(row, ['./.', '.|.']) == False))
df.call_rate = df.GT.apply(lambda row: np.sum(row != 3)/row.size)
if prune_non_snps and len(samples) > 1:
if len(samples) == 1:
self.messages.append("Subsetting on one sample - not pruning monomorphic SNPs.")
original_size = df.size
df = df._prune_non_snps()
pruned_snps = original_size - df.size
self.messages.append(f"Pruned SNPs: {pruned_snps}")
# Update samples
df.samples = self.samples[np.isin(self.samples, samples)]
if inplace:
self.samples = df.samples
self = df
else:
return df
def _parse_interval(interval):
"""
Parses an interval
"""
chrom, *pos = re.split(":-", interval)
if len(pos) not in [0, 2]:
raise Exception("Invalid interval")
elif len(pos) == 2:
pos = list(map(int, pos))
return chrom, pos[0], pos[1]
return chrom, None, None
#def interval(self, interval):
# """
# Filters a VCF on an interval
# """
# chrom, start, end = self._parse_interval(interval)
# if chrom and start and end:
# query_string = f"CHROM == '{chrom}' & POS > {start} & POS < {end}"
# elif chrom:
# query_string = f"CHROM == '{chrom}'"
# return self.query(query_string)
def interval_summary(self, interval=None, deep=False):
"""
Generates a comprehensive interval summary
Args:
interval - Act on an interval
deep - add extra info
"""
if interval:
df = self.interval(interval)
else:
df = self
results = infinite_dict()
# Impact
impact = results['variants']['impact']
impact['total'] = Counter(sum(df.ANN.impact.dropna(), []))
impact['unique'] = Counter(sum(df.ANN.impact.dropna().apply(lambda x: list(set(x))), []))
# FILTER summary
impact = results['variants']['impact']
impact['total'] = Counter(sum(df.ANN.impact.dropna(), []))
impact['unique'] = Counter(sum(df.ANN.impact.dropna().apply(lambda x: list(set(x))), []))
# Summary
variants = results['variants']
variants['filters']['FILTER'] = Counter(df.FILTER.dropna())
FT_vals = np.concatenate(df.FT.values)
if deep:
# These operations take too long.
variants['filters']['FT']['combined'] = Counter(FT_vals)
variants['filters']['FT']['separate'] = Counter(np.concatenate(Series(FT_vals).apply(lambda x: x.split(";")).values))
# snp
variants['snp']['records'] = sum(df.is_snp)
variants['snp']['num_missing'] = sum(df[df.is_snp].num_missing)
variants['snp']['avg_call_rate'] = np.average(df[df.is_snp].call_rate)
variants['snp']['transition'] = sum(df[df.is_snp].is_transition)
variants['snp']['transversion'] = sum(df[df.is_snp].is_transition == False)
variants['snp']['num_hom_ref'] = sum(df[df.is_snp].num_hom_ref)
variants['snp']['num_het'] = sum(df[df.is_snp].num_het)
variants['snp']['num_hom_alt'] = sum(df[df.is_snp].num_hom_alt)
# indel
variants['indel']['records'] = sum(df.is_indel)
variants['indel']['num_missing'] = sum(df[df.is_indel].num_missing)
variants['indel']['avg_call_rate'] = np.average(df[df.is_indel].call_rate)
variants['indel']['transition'] = sum(df[df.is_indel].is_transition)
variants['indel']['transversion'] = sum(df[df.is_indel].is_transition == False)
variants['indel']['num_hom_ref'] = sum(df[df.is_indel].num_hom_ref)
variants['indel']['num_het'] = sum(df[df.is_indel].num_het)
variants['indel']['num_hom_alt'] = sum(df[df.is_indel].num_hom_alt)
# biotype summary
variants['biotype'] = Counter(sum(df.ANN.transcript_biotype.dropna().apply(lambda x: list(set(x))), []))
# By Gene
gene = results['gene']
# Gene count
gene['genes_w_variants'] =len(set(sum(df.ANN.gene_id.dropna().values, [])))
for impact in set(sum(df.ANN.impact.dropna().values, [])):
gene['impact'][impact] = list(set(sum(df[df.ANN.impact == impact].ANN.gene_id.dropna().values, [])))
for transcript_biotype in set(sum(df.ANN.transcript_biotype.dropna().values, [])):
gene['transcript_biotype'][transcript_biotype] = list(set(sum(df[df.ANN.transcript_biotype == transcript_biotype].ANN.gene_id.dropna().values, [])))
# Biotype+Impact counts
for impact in set(sum(df.ANN.impact.dropna().values, [])):
for transcript_biotype in set(sum(df.ANN.transcript_biotype.dropna().values, [])):
filter_crit = (df.ANN.impact == impact) & (df.ANN.transcript_biotype == transcript_biotype)
gene['impact-biotype'][impact][transcript_biotype] = list(set(sum(df[filter_crit].ANN.gene_id.dropna().values, [])))
# Genes
return json.dumps(results)
def interval_summary_table(self):
df = self
genes = pd.read_csv("genes.tsv.gz")
interval_genes = genes[(genes.chrom == df.chrom) & (genes.start > df.start) & (genes.end < df.end) ]
biotypes_set = list(set(sum(df.ANN.transcript_biotype.dropna().values, [])))
for biotype in biotypes_set:
df[biotype] = df.ANN.transcript_biotype == biotype
df['gene_id'] = df.ANN.gene_id.dropna().apply(lambda x: list(set(x))[0])
ALL_gene_count = interval_genes[['biotype', 'gene_id']].groupby(['biotype'], as_index=False) \
.agg(['count'])
ALL_gene_count = flatten_cols(ALL_gene_count).rename(index=str, columns={"gene_id_count": "gene_count"}) \
.reset_index()
GENE_count = df[biotypes_set + ['gene_id']].groupby(['gene_id']) \
.agg(['max']) \
.agg(['sum']) \
.transpose() \
.reset_index() \
.rename(index=str, columns={"sum": "genes_w_variants", "level_0": "biotype"}) \
.drop("level_1", axis=1)
LMH_set = []
for x in ["MODIFIER", "LOW", "MODERATE", "HIGH"]:
lmh_df = df[biotypes_set + ['gene_id']][df.ANN.impact == x].groupby(['gene_id']) \
.agg(['max']) \
.agg(['sum']) \
.transpose() \
.reset_index() \
.rename(index=str, columns={"sum": f"genes_w_{x}_variants", "level_0": "biotype"}) \
.drop("level_1", axis=1)
LMH_set.append(lmh_df)
VARIANT_count = df[biotypes_set].agg(['sum']) \
.transpose() \
.reset_index() \
.rename(index=str, columns={"sum": "variants", "index": "biotype"})
dfs = [ALL_gene_count, GENE_count] + LMH_set + [VARIANT_count]
merged = reduce(lambda left, right: pd.merge(left, right, how='outer', on='biotype'), dfs)
merged.iloc[:,1:] = merged.iloc[:,1:].fillna(0).astype(int)
merged['interval'] = df.interval
return merged.sort_values('variants', ascending=False)
@staticmethod
def _sub_values(row, find, replace):
"""
Substitute values in an array
"""
np.place(row, row == find, replace)
return row
def concordance(self):
"""
Calculate the concordance of genotypes across all samples.
Currently functions with ploidy == 1 or 2
A homozygous REF (e.g. AA) and heterozygous (AG) call
are treated as dicordant.
"""
df = self
# Convert GT to float so nan values can be
# added.
df.GT = df.GT.apply(lambda row: row.astype(float)) \
.apply(lambda row: self._sub_values(row, 3.0, np.nan))
called_gtypes = sum(df.GT.apply(lambda row: np.isnan(row) == False))
# cf
cf = sum(df.GT.apply(lambda row: (row[:, None] == row)))
cf = DataFrame(cf, columns=df.samples, index=df.samples)
cf.index.name = "sample_a"
cf.columns.name = "sample_b"
cf = cf.stack()
cf = DataFrame(cf, columns=['concordant_gt']).reset_index()
n_called_a = pd.DataFrame(called_gtypes, columns=['gt_called_a'], index=df.samples)
n_called_b = pd.DataFrame(called_gtypes, columns=['gt_called_b'], index=df.samples)
n_called_a.index.name = 'sample_a'
n_called_b.index.name = 'sample_b'
cf = cf.join(n_called_a, on='sample_a').join(n_called_b, on='sample_b')
cf['minimum_gt'] = cf.apply(lambda row: min(row.gt_called_a, row.gt_called_b), axis=1)
cf['concordance'] = cf['concordant_gt'] / cf['minimum_gt']
return cf
def hard_filter(self):
"""
The hard filter method does two things:
(1) Removes all columns where
FILTER != PASS (which is represented as None in pandas-vcf)
(2) Sets FT (genotype-level) variants to NaN.
"""
df = self
df.GT = df.GT.apply(lambda row: row.astype(float)) \
.apply(lambda row: self._sub_values(row, 3.0, np.nan)) \
# Format genotypes and filters.
GT_filter = np.vstack(df.FT.apply(lambda row: row != "PASS").values)
GT_vals = np.vstack(df.GT.apply(lambda row: row.astype(float)).values)
# Apply nan filter to FT != PASS
GT_vals[GT_filter] = np.nan
# Re-integrate genotypes
df.GT = Series(list(GT_vals))
# FILTER columns
df = df[df.FILTER.isnull()]
return df
def to_fasta(self, filename=None):
"""
Generates a FASTA file
"""
df = self
for sample, row in zip(df.samples, np.vstack(df.TGT.values).T):
print(f">{sample}")
seq = Series(row).apply(lambda row: np.str.replace(row, "|", "/")) \
.apply(lambda row: np.str.split(row, "/")) \
.apply(lambda row: row[0] if len(set(row)) == 1 else "N")
print(''.join(seq.values).replace(".", "N"))
|
mit
|
Evidlo/redrum
|
setup.py
|
2
|
1130
|
from setuptools import setup
from redrum import version
import os
import shutil
module_path = os.path.dirname(os.path.realpath(__file__)) + '/redrum'
config_file = os.path.expanduser('~/.config/redrum.ini')
shutil.copyfile(module_path + '/redrum.ini', config_file)
setup(
name='redrum',
version=version.__version__,
packages=['redrum'],
author="Evan Widloski",
author_email="[email protected]",
description="uses math to select wallpapers from Reddit",
long_description=open('README.rst').read(),
license="MIT",
keywords="Reddit wallpaper changer",
url="https://github.com/evidlo/redrum",
data_files = [(os.path.dirname(config_file), ['redrum/redrum.ini'])],
entry_points={
'console_scripts': ['redrum = redrum.redrum:main',
'redrum_tune = redrum.tune_gui:main [tune]'
]
},
install_requires=[
"requests",
"configparser"
],
extras_require={
'tune': ['matplotlib', 'numpy']
},
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3"
]
)
|
mit
|
belltailjp/scikit-learn
|
examples/applications/plot_prediction_latency.py
|
234
|
11277
|
"""
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
|
bsd-3-clause
|
WafaaT/spark-tk
|
regression-tests/sparktkregtests/testcases/frames/frame_sort_test.py
|
10
|
6152
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test interface functionality of frame.sort"""
import unittest
import sys
from sparktkregtests.lib import sparktk_test
class FrameSortTest(sparktk_test.SparkTKTestCase):
"""Test fixture the Frame sort function"""
def setUp(self):
super(FrameSortTest, self).setUp()
# create standard, defunct, and empty frames"
dataset = self.get_file("dogs.csv")
schema = [("age", int),
("name", str),
("owner", str),
("weight", int),
("hair_type", str)]
# header=True means first line will be skipped
self.frame = self.context.frame.import_csv(dataset,
schema=schema, header=True)
def test_frame_sort_single_column_ascending(self):
""" Test single-column sorting ascending"""
# sort by weight, defaults to ascending
self.frame.sort("weight")
# get just the weight column
sorted = self.frame.copy("weight")
# take all rows and examine data
sorted_data = sorted.take(sys.maxint)
# last will keep track of the previous item
# we initialize it to an integer minimum
last = -1 * sys.maxint
# iterate through and assert that the last item
# is less than current while updating current
for i in range(len(sorted_data)):
assert sorted_data[i][0] >= last
last = sorted_data[i][0]
# this test will instead of comparing just the weight column
# it will make sure row integrity is preserved across all cols
# as a sanity check that the algorithm is not just sorting
# the weight column but is in fact sorting all of the rows by
# the weight column value
def test_frame_sort_single_column_ascending_compare_all_cols(self):
""" Test single-column sorting ascending with the argument"""
frame_copy = self.frame.copy()
unsorted_data = frame_copy.take(frame_copy.count())
self.frame.sort("weight", ascending=True)
sorted = self.frame.copy()
sorted_data = sorted.take(sorted.count())
last = -1 * sys.maxint
for i in range(len(sorted_data)):
assert sorted_data[i][3] >= last
last = sorted_data[i][3]
# here we are making sure that the row integrity is
# preserved by checking that the entire row
# exists as is in the original data
if sorted_data[i] not in unsorted_data:
raise ValueError("integrity of row not preserved through sorting")
def test_frame_sort_single_column_descending(self):
""" Test single-column sorting descending with the argument"""
self.frame.sort("weight", ascending=False)
sorted = self.frame.copy("weight")
sorted_data = sorted.take(sys.maxint)
last = sys.maxint
for i in range(len(sorted_data)):
assert sorted_data[i][0] <= last
last = sorted_data[i][0]
def test_frame_sort_multiple_column_ascending(self):
""" Test multiple-column sorting ascending"""
unsorted = self.frame.to_pandas(self.frame.count())
self.frame.sort(["weight", "hair_type"])
up_take = self.frame.to_pandas(self.frame.count())
sorted_vals = unsorted.sort_values(['weight', 'hair_type'])
# compare the data we sorted with the sorted frame
for i in range(len(sorted_vals)):
self.assertEqual(
up_take.iloc[i]['weight'], sorted_vals.iloc[i]['weight'])
self.assertEqual(
up_take.iloc[i]['hair_type'], sorted_vals.iloc[i]['hair_type'])
def test_frame_sort_multiple_column_tuple_descending(self):
""" Test multiple-column sorting descending with the argument"""
self.frame.sort([("weight", False), ("hair_type", False)])
up_take = self.frame.to_pandas(self.frame.count())
sorted_vals = up_take.sort_values(['weight', 'hair_type'],
ascending=[False, False])
for i in range(len(sorted_vals)):
self.assertEqual(
up_take.iloc[i]['weight'], sorted_vals.iloc[i]['weight'])
self.assertEqual(
up_take.iloc[i]['hair_type'], sorted_vals.iloc[i]['hair_type'])
def test_frame_sort_multiple_column_mixed(self):
""" Test multiple-column sorting descending with the argument"""
self.frame.sort([("weight", False), ("hair_type", True), ('age', True)])
up_take = self.frame.to_pandas(self.frame.count())
sorted_vals = up_take.sort_values(
['weight', 'hair_type', 'age'], ascending=[False, True, True])
for i in range(len(sorted_vals)):
self.assertEqual(
up_take.iloc[i]['weight'], sorted_vals.iloc[i]['weight'])
self.assertEqual(
up_take.iloc[i]['hair_type'], sorted_vals.iloc[i]['hair_type'])
self.assertEqual(
up_take.iloc[i]['age'], sorted_vals.iloc[i]['age'])
def test_frame_sort_error_bad_column(self):
""" Test error on non-existant column"""
with self.assertRaisesRegexp(Exception, "Invalid column"):
self.frame.sort('no-such-column')
def test_invalid_arguments(self):
"""Test no arguments errors"""
with self.assertRaisesRegexp(Exception, "2 arguments"):
self.frame.sort()
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
janpipek/boadata
|
boadata/trees/excel.py
|
1
|
1464
|
from boadata.core import DataNode, DataTree
import pandas as pd
import os
import xlrd # This is used by pandas to import excel
class ExcelSheetNode(DataNode):
def __init__(self, xls, sheet_name, parent=None):
super(ExcelSheetNode, self).__init__(parent)
self.xls = xls
self.sheet_name = sheet_name
node_type = "Excel sheet"
@property
def title(self):
return self.sheet_name
@property
def data_object(self):
from boadata.data.excel_types import ExcelSheet
data = self.xls.parse(self.sheet_name)
return ExcelSheet(inner_data=data, uri=self.uri)
@DataTree.register_tree
class ExcelFile(DataTree):
def __init__(self, path, parent=None):
super(ExcelFile, self).__init__(parent)
self.path = path
self.xls = None # Load it lazily
node_type = "Excel"
def load_children(self):
if not self.xls:
self.xls = pd.ExcelFile(self.path)
for sheet_name in self.xls.sheet_names:
self.add_child(ExcelSheetNode(self.xls, sheet_name, self))
@classmethod
def accepts_uri(cls, uri):
if not uri or not os.path.isfile(uri):
return False
if os.path.splitext(uri)[1] in [".xls", ".xlsx", ".xlmx"]:
return True
return False
# mime_types = (
# "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
# "application/vnd.ms-excel"
# )
|
mit
|
hmc-cs-rkretsch/Secondary-Protein-Structure
|
Lit/s2Dv2/s2D_class.py
|
1
|
85152
|
'''
Copyright (C) 2014 Pietro Sormanni
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
contacts: Pietro Sormanni
e-mail: [email protected]
Dept. of Chemistry
University of Cambridge
Cambridge, UK
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it
under the condition of preserving this text
'''
WEBSERVER_MODE=False
import sys,os
import numpy
import sysconfig
#from traceback import print_exc
import PyELM
import s2D
from traceback import print_exc
module_path=str(s2D.__file__)
module_path=os.path.abspath( module_path[:module_path.rfind('/')])
if module_path[-1]!='/' : module_path+='/'
default_c_compiler = sysconfig.get_config_vars('CC')[0]
if default_c_compiler==None : default_c_compiler='cc'
default_parser_executable=module_path+'chkparse'
if WEBSERVER_MODE :
sys.path=['/home/ps589/.local/lib/python2.7/site-packages/','/home/ps589/.local/lib/python2.7/site-packages/six-1.8.0-py2.7.egg', '/home/ps589/.local/lib/python2.7/site-packages/mock-1.0.1-py2.7.egg','/home/ps589/.local/lib/python2.7/site-packages/distribute-0.6.28-py2.7.egg','/home/ps589/.local/lib/python2.7/site-packages/nose-1.3.4-py2.7.egg','/home/ps589/.local/lib/python2.7/site-packages/matplotlib-1.4.1-py2.7-linux-x86_64.egg']+sys.path
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator,AutoMinorLocator
matplotlib.use('Agg')
can_plot=True
else :
try : # check if plots can be generated
import matplotlib,distutils
matplot_vers= distutils.version.LooseVersion(matplotlib.__version__).version
if matplot_vers<[1,4] :
sys.stderr.write("*Warning*: your matplotilb version is %s but s2D requires >= 1.4 to produce plots.\n You can still run s2D to make predictions, but you won't be able to use it to make plots.\n" % (str(matplotlib.__version__)))
can_plot=False
else : can_plot=True
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator,AutoMinorLocator
plt.rcdefaults()
plt.rc('figure',facecolor='white')
except Exception :
#print_exc(file=sys.stderr)
sys.stderr.write("\n***WARNING*** matplotlib module not found. attempting to plot the results will generate ERRORS..\n\n")
sys.stderr.flush()
can_plot=False
pass
try : # get default parameters from file
#from pkg_resources import Requirement, resource_filename
#filename = resource_filename(Requirement.parse("s2D"),"s2D_parameters.txt")
#print filename
#foo_config = resource_string(__name__, 'foo.conf')
#if os.path.isfile(filename) : #
if os.path.isfile(module_path+'s2D_parameters.txt') :
parameter_filename=module_path+'s2D_parameters.txt'
#imported_parameter_class=s2D.s2D_parameters()
#imported_parameter_class.read('s2D_parameters.txt')
else :
parameter_filename=None
#imported_parameter_class=None
except Exception :
parameter_filename=None
#imported_parameter_class=None
#print_exc(file=sys.stderr)
pass
def convert_to_number(string, force_float=False):
'''
this function check if a string is an int or a float and it returns a tuple in the form
converted_string,bool. Bool is True if the sting has been converted, False if the string is still in string format.
the function is quite slow
'''
if force_float :
try :
return float(string),True
except ValueError :
return string,False
try :
return int(string),True
except ValueError :
try :
return float(string),True
except ValueError :
return string,False
"""
probably to delete:
def get_numbers_from_string(string, force_float=False):
'''
from a string (like a file name or such) it extracts all the possible numbers and return them in a list
it does NOT process negative values (will be read as positives)
'''
candidates=[]
reading=False
for ch in string:
if ch.isdigit() or ch=='.' :
if reading :
candidates[-1]+=ch
else :
candidates+=[ch]
reading=True
else :
reading=False
numbers=[]
for ca in candidates :
if ca[-1]=='.' : ca=ca[:-1]
if ca[0]=='.' and len(ca)>1 : ca=ca[1:]
ca,ok=convert_to_number(ca, force_float=force_float)
if ok :
numbers+=[ca]
return numbers
"""
res_closness_blosum62 = ['V', 'I', 'M', 'L', 'F', 'W', 'Y', 'H', 'N', 'S', 'P', 'T', 'C', 'A', 'G', 'D', 'E', 'Q', 'K', 'R']
# convert residue to neurons without using blast
def residue_to_input(res, numb_neurones=1,out_in_range=(-10.,10.), use_rank=res_closness_blosum62,nres=20):
if res not in use_rank:
raise Exception('*ERROR* residue '+str(res)+' not recognized ')
return None
if numb_neurones==1 :
inp = out_in_range[0] + use_rank.index(res)*(out_in_range[1]-out_in_range[0])/(nres*1.)
return inp
else :
inp=[0]*nres
inp[ use_rank.index(res) ]=1
return inp
def linear_correction(out_from_networks,angular_coefficients=numpy.array([1.240177580,1.27546432,1.397699985]) , intercepts=numpy.array([-0.070890685,-0.0577194695,-0.198067935]) ):
'''
applies a linear correction. THIS IS NOT USED IN THE CURRENT VERSION
the default parameters implies an output of 3 number per position of the Seq network.
helix: y= 1.240177580 *x -0.070890685
beta : y= 1.27546432 *x -0.057719469498999997
coil : y= 1.397699985 *x -0.198067935
'''
return out_from_networks*angular_coefficients + intercepts
def constrain_in_range(numpy_array,Min=0., Max=1.,shift_by=0.01):
numpy_array[numpy_array<Min]=Min+shift_by # the addition of shift_by is crucial if you normalize after
numpy_array[numpy_array>Max]=Max-shift_by
return numpy_array
def normalize_numpy(numpy_array, scaling_factor=1., constrain_first=True,Min=0., Max=1.,shift_by=0.01):
'''
assumes that numpy_array is 2 dimensional, each row is the output of one residue
and each column represents a property. It normalizes the properties so that they sum to one
the input array is changed, no copy is made!
if constrain_first it first put the entries in 0 ,1
'''
if constrain_first : numpy_array=constrain_in_range(numpy_array,Min=Min, Max=Max,shift_by=shift_by)
den=numpy_array.sum(axis=1)/scaling_factor
for i in range(numpy_array.shape[1]) :
numpy_array[:,i]/=den
del den
return numpy_array
def linear_correct_and_normalize(out_from_networks):
'''
it first applies a linear correction and then normalizes the output
'''
return normalize_numpy( linear_correction(out_from_networks) , constrain_first=True)
"""
PLOT FUNCTION
these functions are used to produce a plot of the results. (Besides plot_s2D_results all others are more general and can be used for many purposes).
"""
class cycle_list(list):
def __init__(self,l=[]):
list.__init__(self,l)
def __getitem__(self,y) :
#x.__getitem__(y) <==> x[y]
if y>=len(self) :
y=y%len(self)
return list.__getitem__(self,y)
color_palette = cycle_list([(0.25098039215686274, 0.4470588235294118, 0.792156862745098), (0.4235294117647059, 0.6831372549019608, 0.24313725490196078), (0.8470588235294118, 0.7507843137254902, 0.16784313725490197), (0.8196078431372549, 0.5333333333333333, 0.15294117647058825), (0.7764705882352941, 0.29411764705882354, 0.10980392156862745), (0.4549019607843137, 0.3254901960784314, 0.6509803921568628)])
def plot_s2D_results(s2D_output,sequences=None,ss_kind_str=None,seq_names=None,title=None,frame=None, print_all_sequence=True,dont_plot_coil=True,plot_coil_as_bar=False,coil_index=2,coil_color=None,plot_legend=True,y_range=(0,1.00001),figure_and_axts_tuple=None,bar=True,dpi=300,max_res_per_plot=None,color_palette=color_palette \
,xlabel='Residue', ylabel='Secondary structure population',legend_location='upper right',start_rescount=1,y_major_tick_every=0.5,y_minor_tick_every=0.1,x_major_tick_every=None,x_minor_tick_every=None, legend_size=None,figure_size=None,save=True,show=False,**kwargs):
'''
plot the results in a graph, can read the output file produced by the s2D class.
out_tag not used yet
return fig,axt
'''
lw=0.25
if not bar : lw=3.
if type(sequences) is not list or type(sequences) is not tuple :
sequences=[sequences]
if type(seq_names) is not list or type(seq_names) is not tuple :
seq_names=[seq_names]
if type(s2D_output) is str :
seq_names,sequences,s2D_output,ss_kind_str = read_output(s2D_output)
elif type(s2D_output) is not list : # One list per sequence # and not ( hasattr(s2D_output,'shape') and len(s2D_output.shape)>1 ): # if it is not a list and not an (at least) bidimensional numpy array
s2D_output=[s2D_output]
save_plot=None
for j,out in enumerate(s2D_output) : # loop on sequences
#if sequences[j]==None :
# sequences[j]=' '*len(out)
legend=['Helix','Strand','Coil','Polyproline-II'][:len(out.T)]
if seq_names[j]==None : seq_names[j]=''
if title==None and seq_names[j]!=None and seq_names[j]!='': title ='s2D Prediction '+seq_names[j]
if dont_plot_coil :
cols=color_palette[:coil_index]+color_palette[coil_index+1:]
fig,axt=plot_seq_profile(sequences[j], list(out.T[:coil_index])+list(out.T[coil_index+1:]),annotation_string=ss_kind_str , bar=bar, start_rescount=start_rescount, xlabel=xlabel, ylabel=ylabel, title=title,frame=frame,y_range=y_range,zygg_like_lines=False, print_all_sequence=print_all_sequence, color=cols, max_res_per_plot=max_res_per_plot \
,y_major_tick_every=y_major_tick_every,y_minor_tick_every=y_minor_tick_every,x_major_tick_every=x_major_tick_every,x_minor_tick_every=x_minor_tick_every, show=False, linewidth=lw,figure_and_axts_tuple=figure_and_axts_tuple,figure_size=figure_size, save=None,**kwargs)
if plot_legend :
if bar : add_custom_legend(legend[:coil_index]+legend[coil_index+1:],facecolors=cols,edgecolors=None,figure=fig, legend_location=legend_location, legend_size=legend_size,linewidth=[lw]*len(cols), frame_facecolor=(1.,1.,1.,0.7))
else : add_custom_legend(legend[:coil_index]+legend[coil_index+1:],facecolors=None,edgecolors=cols,figure=fig, legend_location=legend_location, legend_size=legend_size,linewidth=[lw]*len(cols),frame_facecolor=(1.,1.,1.,0.7))
else :
if plot_coil_as_bar :
cols=color_palette[:len(out.T)]
fig,axt=plot_seq_profile(sequences[j], out.T,annotation_string=ss_kind_str , bar=plot_coil_as_bar, start_rescount=start_rescount, xlabel=xlabel, ylabel=ylabel, title=title,frame=frame,y_range=y_range,zygg_like_lines=False, print_all_sequence=print_all_sequence, color=cols,max_res_per_plot=max_res_per_plot ,y_major_tick_every=y_major_tick_every,y_minor_tick_every=y_minor_tick_every,x_major_tick_every=x_major_tick_every,x_minor_tick_every=x_minor_tick_every, show=False, linewidth=lw,figure_and_axts_tuple=figure_and_axts_tuple,figure_size=figure_size, save=None,**kwargs)
face_cols,edge_cols,lws = cols,[None]*len(cols),[lw]*len(cols)
else :
if 'Coil' in legend :
legend.remove('Coil')
legend+=['Coil']
if coil_color!=None :
coil_col=coil_color
cols=color_palette[:len(out.T)-1]
else :
coil_col=color_palette[coil_index]
cols=(color_palette[:coil_index]+color_palette[coil_index+1:])[:len(out.T)-1]
if bar : lw_coil=lw+2
else : lw_coil=lw
fig,axt=plot_seq_profile(sequences[j], list(out.T[:coil_index])+list(out.T[coil_index+1:]),annotation_string=ss_kind_str , bar=bar, start_rescount=start_rescount, xlabel=xlabel, ylabel=ylabel, title=title,frame=frame,y_range=y_range,zygg_like_lines=False, print_all_sequence=print_all_sequence, color=cols,max_res_per_plot=max_res_per_plot ,y_major_tick_every=y_major_tick_every,y_minor_tick_every=y_minor_tick_every,x_major_tick_every=x_major_tick_every,x_minor_tick_every=x_minor_tick_every, show=False, linewidth=lw,figure_and_axts_tuple=figure_and_axts_tuple,figure_size=figure_size, save=None,**kwargs)
fig,axt=plot_seq_profile(sequences[j], out.T[coil_index],annotation_string=ss_kind_str, bar=False, start_rescount=start_rescount, xlabel=xlabel, ylabel=ylabel, title=title,frame=frame,y_range=y_range,zygg_like_lines=False, print_all_sequence=print_all_sequence, color=coil_col,max_res_per_plot=max_res_per_plot, y_major_tick_every=y_major_tick_every,y_minor_tick_every=y_minor_tick_every,x_major_tick_every=x_major_tick_every,x_minor_tick_every=x_minor_tick_every, show=False, linewidth=lw_coil,figure_and_axts_tuple=(fig,axt),figure_size=figure_size, save=None,**kwargs)
if bar :
face_cols=cols+[None] #color_palette[:coil_index]+[None]+color_palette[coil_index+1:]
edge_cols,lws = [None]*(len(legend)-1)+[coil_col],[lw]*(len(legend)-1)+[lw_coil] #[ None if c!=None else color_palette[ji] for ji,c in enumerate(face_cols)], [lw if c!=None else lw_coil for ji,c in enumerate(face_cols) ]
#print len(cols),len(face_cols),face_cols
else : face_cols,edge_cols,lws=[None]*len(legend),cols+[coil_col],[lw]*len(legend)#[None]*len(legend) , color_palette[:len(legend)],[lw]*len(legend)
if plot_legend :
add_custom_legend(legend,facecolors=face_cols,edgecolors=edge_cols,figure=fig, legend_location=legend_location, legend_size=legend_size,linewidth=lws,frame_facecolor=(1.,1.,1.,0.7))
#cols=color_palette[:2]+color_palette[3:4]
#fig,axt=plot_seq_profile(sequences[j], out.T,annotation_string=ss_kind_str , bar=bar, start_rescount=start_rescount, xlabel=xlabel, ylabel=ylabel,frame=frame, title=title,y_range=y_range,zygg_like_lines=False, print_all_sequence=print_all_sequence, color=cols \
# ,y_major_tick_every=y_major_tick_every,y_minor_tick_every=y_minor_tick_every,x_major_tick_every=x_major_tick_every,x_minor_tick_every=x_minor_tick_every, show=False, linewidth=lw,figure_and_axts_tuple=figure_and_axts_tuple,figure_size=figure_size, save=None)
#if plot_legend :
# if bar : add_custom_legend(['Helix','Strand','Coil'],facecolors=cols,edgecolors=None,figure=fig, legend_location=legend_location, legend_size=legend_size)
# else : add_custom_legend(['Helix','Strand','Coil'],facecolors=None,edgecolors=cols,figure=fig, legend_location=legend_location, legend_size=legend_size)
if type(save) is str : save_plot=save
elif save==True : save_plot=seq_names[j].replace('|','_').replace(' ','_').replace('/','_').replace(':','_')+'s2D_plot.png'
if save!=None and save!=False:
if 'png' in save_plot : transparent=True
else : transparent=False
fig.savefig(save_plot, dpi=dpi,bbox_inches="tight",transparent=transparent)
if show :
plt.show(block=False)
return fig,axt
default_parameters = {
'hgrid':True,
'vgrid':True,
'frame':True,
'all_tight':False,
'seq_max_res_per_plot':200}
default_error_bars = {
'capsize':4,
'capthick':1.,
'elinewidth':1.}
text_sizes = {
'value_labels':18,
'xlabels':18,
'xlabels_many':'small',
'xlabel':22,
'ylabel':22,
'title':24,
'legend_size':12}
publication = {
'value_labels':22,
'xlabels':22,
'xlabels_many':15,
'xlabel':30,
'ylabel':30,
'title':30,
'legend_size':18}
# if you set the size for 'default' all the figures will come out of that size disregarding their type. Otherwise you can change the figure size for each type (key in dictionary)
default_figure_sizes={
'all_tight':False ,
'use_cm':False ,
'dpi':300,
'default':None ,
'sequence':(14.2,8)
}
def set_publish(all_same_figure_size=False, thick_ticks=True, axis_tickness=True, no_grids=True, text_sizes=text_sizes, publication=publication, default_figure_sizes=default_figure_sizes):
default_error_bars['capsize'] = 8
default_error_bars['capthick'] = 2
default_error_bars['elinewidth'] = 2
for k in publication:
text_sizes[k] = publication[k] # with an = or with .copy it does not work
default_parameters['seq_max_res_per_plot']=100
default_parameters['all_tight']=True
plt.rc('xtick', labelsize=text_sizes['xlabels'])
plt.rc('ytick', labelsize=text_sizes['xlabels'])
plt.rc('ytick.major', width=1.5, size=6)
plt.rc('ytick.minor', width=1., size=3)
plt.rc('xtick.major', width=1.5, size=6)
plt.rc('xtick.minor', width=1., size=3)
default_figure_sizes['all_tight'] = True
if no_grids:
for p in ['vgrid','hgrid'] :
default_parameters[p] = False
default_parameters['frame']=True # frame on for s2D
if thick_ticks :
plt.rc('ytick.major', width=2.5, size=10)
plt.rc('ytick.minor', width=2, size=6)
plt.rc('xtick.major', width=2.5, size=10)
plt.rc('xtick.minor', width=2, size=6)
if axis_tickness:
plt.rc('axes', linewidth=2, edgecolor='black')
if all_same_figure_size != False: # I could just change default_figure_sizes['default'] but this is safer
if type(all_same_figure_size) is tuple:
for s in default_figure_sizes:
if s == 'all_tight' or s == 'use_cm' or s == 'dpi':
continue
default_figure_sizes[s] = all_same_figure_size
else:
for s in default_figure_sizes:
if s == 'all_tight' or s == 'use_cm' or s == 'dpi':
continue
default_figure_sizes[s] = 10, 10
return
def plot_seq_profile(sequence, profile, annotation_string=None,use_subplot=True,bar=False,bar_sep=0.2,log_scale=False,avoid_scientific_notation=True,max_res_per_plot=None,stacked=False,start_rescount=1, label='',xlabel='Residue',ylabel='Score (a.u.)',title=None,zygg_like_lines=True,hline=0,vgrid=None,frame=None ,print_all_sequence=True,color=None, show=True\
,yerr=None,y_range=None,y_major_tick_every=None,y_minor_tick_every=None,x_major_tick_every=None,x_minor_tick_every=None,ls='-', linewidth=1,marker='',markerfacecolor=True,markeredgecolor=True, markersize=18,upper_label_rotation='horizontal',legend_location='upper right',legend_size=None,figure_size=None, figure_and_axts_tuple=None, save='') :
'''
figure_and_axts_tuple can be given to superimpose
'''
if frame==None : frame=default_parameters['frame']
#if hgrid==None : hgrid=default_parameters['hgrid']
if vgrid==None : vgrid=default_parameters['vgrid']
if max_res_per_plot==None : max_res_per_plot=default_parameters['seq_max_res_per_plot']
if figure_size==None :
if default_figure_sizes['default']==None :
figure_size = default_figure_sizes['sequence']
else : figure_size = default_figure_sizes['default']
if type(sequence) is str and len(sequence)>900 :
figure_size=(figure_size[0],min([20,int(len(sequence)/10)]))
#print 'DEB figure_size',figure_size
#if default_figure_sizes['use_cm'] : figure_size= ( cmToinch(figure_size[0]), cmToinch(figure_size[1]))
if legend_size==None : legend_size=text_sizes['legend_size']
plt.rc('xtick', labelsize=text_sizes['xlabels'])
plt.rc('ytick', labelsize=text_sizes['xlabels'])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
#if default_figure_sizes['all_tight'] :
# #if y_major_tick_every==None and x_major_tick_every==None :
# plt.locator_params(axis='both', tight=None, nbins=5)
if hasattr(profile[0],'__len__') :
ismulti=True
if label!='' and label!=None and type(label) is not list and type(label) is not tuple : label=len(profile)*[label]
if type(ls) is str or ls==None : ls=[ls]*len(profile)
if type(linewidth) is int or type(linewidth) is float or linewidth==None : linewidth=[linewidth]*len(profile)
lengths=[]
for p in profile :
if len(p) not in lengths : lengths+=[len(p)]
maxlength=max(lengths)
if len(lengths)>1 :
sys.stderr.write("**WARNING in plot_seq_profile() given profiles of different lengths (found %s). Appending zeros at end of shorter profiles!\n\n" % (str(lengths)) )
for j,p in enumerate(profile) :
if len(p)< maxlength :
profile[j]=list(p)+[0.]*(maxlength-len(p))
profile=numpy.array(profile)
if yerr!=None : yerr=numpy.array(yerr)
prof=profile[0]
if y_range == None :
Min,Max= profile.min(),profile.max()
while hasattr(Min,'__len__') : Min=min(Min)
while hasattr(Max,'__len__') : Max=max(Max)
else :
ismulti=False
prof=profile
if y_range == None :
Min=min(profile)
Max=max(profile)
if y_range == None :
ymin=int(Min -1.)
ymax=int(Max +1.)
else :
ymin,ymax=y_range
if sequence!=None and len(sequence)!=len(prof) :
sys.stderr.write('**WARNING** in plot_seq_profile() len(sequence)!=len(profile) %d!=%d\n' % (len(sequence),len(prof)))
#if ecolor==None : ecolor=color
if type(markeredgecolor) is bool and markeredgecolor==True : markeredgecolor=color
if type(markerfacecolor) is bool and markerfacecolor==True : markerfacecolor=color
if type(color) is list or isinstance(color,cycle_list) :
if type(markerfacecolor) is not list and not isinstance(markerfacecolor,cycle_list) : markerfacecolor= cycle_list([markerfacecolor]*len(color))
if type(markeredgecolor) is not list and not isinstance(markeredgecolor,cycle_list) : markeredgecolor= cycle_list([markeredgecolor]*len(color))
#if type(ecolor) is not list and not isinstance(ecolor,cycle_list) : ecolor=cycle_list([ecolor]*len(color))
if use_subplot :
if figure_and_axts_tuple!=None :
fig,axt = figure_and_axts_tuple
n_profs=len(axt)
do_tight=False
else :
if len(prof)%int(max_res_per_plot) > 0 : add=1
else : add=0.001 # in case of rounding errors from the float conversion
n_profs=max([1 , int(len(prof)/float(max_res_per_plot)+add)]) # up to 199 residues per plot
fig,axt = plt.subplots(n_profs, sharey=True,figsize=figure_size) # axt is a tuple of n_profs size Use , sharex=True to share x axis
if n_profs==1 : axt=(axt,)
do_tight=True
# determine the number of residues per subplot
res_per_plot=len(prof)/n_profs
rem=len(prof)%n_profs
pzise=[res_per_plot]*n_profs
j=0
while rem>0 :
pzise[j]+=1
rem -=1
j +=1
start=0
line_styles=[]
for j,nres in enumerate(pzise) :
xlim_m=start+start_rescount-0.5
xlim_M=start+nres+start_rescount-0.5
if default_figure_sizes['all_tight'] :
if x_minor_tick_every==None : axt[j].xaxis.set_minor_locator(AutoMinorLocator(n=2))
if y_minor_tick_every==None : axt[j].yaxis.set_minor_locator(AutoMinorLocator(n=2))
if ismulti :
to_plot=profile[:,start:start+nres]
if yerr!=None : pl_yerr=[ a[start:start+nres] if a!=None else None for a in yerr ]
else : pl_yerr=[None]*len(to_plot)
else :
to_plot=profile[start:start+nres]
if yerr!=None : pl_yerr=yerr[start:start+nres]
else : pl_yerr=None
#if log_scale :
# if j==0 and y_range!=None : print "WARNING log_scale is overwriting y_range"
# _,to_plot,y_range =logscale(axt[j], entries=to_plot,add_one=True,add_zero=True)
if bar :
if ismulti :
if stacked :
bottom=numpy.zeros(nres)
sep,bar_width = bar_sep, (1.-bar_sep)
left=numpy.array(range(start+start_rescount,start+nres+start_rescount))-0.5+sep/2.
else :
sep, bar_width= float(bar_sep)/(len(profile)+1), (1.-bar_sep)/len(profile)# +1 is there so that bar groups will be separated by 2*sep
bottom=None
left = numpy.array(range(start+start_rescount,start+nres+start_rescount))-0.5+sep
for i,prof in enumerate(to_plot) :
if type(color) is list or isinstance(color,cycle_list) : l= axt[j].bar(left, prof,bottom=bottom,yerr=pl_yerr[i] , width=bar_width,linewidth=linewidth[i],color=color[i])
else : l=axt[j].bar(left, prof, width=bar_width,yerr=pl_yerr[i], linewidth=linewidth[i],color=color)
if stacked : bottom+=numpy.array(prof)
else : left+=sep+bar_width
if start==0 : line_styles+=[l]
del l
else :
sep,bar_width = bar_sep, (1.-bar_sep)
left=numpy.array(range(start+start_rescount,start+nres+start_rescount))-0.5+sep/2.
l=axt[j].bar(left,to_plot,width=bar_width,yerr=pl_yerr, linewidth=linewidth,color=color)
if start==0 : line_styles+=[l]
del l
else :
if ismulti :
x_pos=range(start+start_rescount,start+nres+start_rescount)
for i,prof in enumerate(to_plot) :
if color==None : l=axt[j].errorbar(x_pos, prof, linewidth=linewidth[i],yerr=pl_yerr[i],ls=ls[i],elinewidth=default_error_bars['elinewidth'], capsize=default_error_bars['capsize'], capthick=default_error_bars['capthick']\
, marker=marker,markersize=markersize,markeredgecolor=markeredgecolor,markerfacecolor=markerfacecolor)
elif type(color) is list or isinstance(color,cycle_list) :
l=axt[j].errorbar(x_pos, prof,yerr=pl_yerr[i], linewidth=linewidth[i],ls=ls[i],color=color[i],elinewidth=default_error_bars['elinewidth'], capsize=default_error_bars['capsize'], capthick=default_error_bars['capthick']\
, marker=marker,markersize=markersize,markeredgecolor=markeredgecolor[i],markerfacecolor=markerfacecolor[i])
else : l=axt[j].errorbar(x_pos, prof, linewidth=linewidth[i],yerr=pl_yerr[i],ls=ls[i],color=color,elinewidth=default_error_bars['elinewidth'], capsize=default_error_bars['capsize'], capthick=default_error_bars['capthick']\
, marker=marker,markersize=markersize,markeredgecolor=markeredgecolor,markerfacecolor=markerfacecolor)
if start==0 : line_styles+=[l]
del l
else :
if color==None :l=axt[j].errorbar(range(start+start_rescount,start+nres+start_rescount),to_plot, linewidth=linewidth,yerr=pl_yerr,ls=ls,elinewidth=default_error_bars['elinewidth'], capsize=default_error_bars['capsize'], capthick=default_error_bars['capthick'], marker=marker,markersize=markersize,markeredgecolor=markeredgecolor,markerfacecolor=markerfacecolor)
else :l=axt[j].errorbar(range(start+start_rescount,start+nres+start_rescount),to_plot, linewidth=linewidth,yerr=pl_yerr,ls=ls,color=color,elinewidth=default_error_bars['elinewidth'], capsize=default_error_bars['capsize'], capthick=default_error_bars['capthick'], marker=marker,markersize=markersize,markeredgecolor=markeredgecolor,markerfacecolor=markerfacecolor)
if start==0 : line_styles+=[l]
del l
#axt[j].set_xlim(start+start_rescount,start+nres+start_rescount-1)
if hline!=None :
if type(hline) is not list or type(hline) is not tuple : hline=[hline]
for ypos in hline :
axt[j].axhline(ypos,color='black',ls='-') #use thick line in this case, it represent the axis)
if zygg_like_lines!=False :
if type(zygg_like_lines) is not list or type(zygg_like_lines) is not tuple :
zygg_like_lines=(-1,1)
for ypos in zygg_like_lines :
axt[j].axhline(ypos,color='black',ls='--',lw=0.5) #use thick line in this case, it represent the axis)
if log_scale :
axt[j].set_yscale('symlog',basey=10)
if avoid_scientific_notation:
yticks=axt[j].yaxis.get_majorticklocs()
xlab=[ 10**i for i in xrange(len(yticks))]
axt[j].set_yticklabels(xlab,rotation='horizontal',verticalalignment='center',horizontalalignment='right',fontsize=text_sizes['xlabels'])
if y_minor_tick_every!=None :
yminorLocator = MultipleLocator(y_minor_tick_every)
#for the minor ticks, use no labels; default NullFormatter
axt[j].yaxis.set_minor_locator(yminorLocator)
if y_major_tick_every!=None :
ymajorLocator = MultipleLocator(y_major_tick_every)
axt[j].yaxis.set_major_locator(ymajorLocator)
if x_minor_tick_every!=None :
xminorLocator = MultipleLocator(x_minor_tick_every)
#for the minor ticks, use no labels; default NullFormatter
axt[j].xaxis.set_minor_locator(xminorLocator)
if x_major_tick_every!=None :
xmajorLocator = MultipleLocator(x_major_tick_every)
axt[j].xaxis.set_major_locator(xmajorLocator)
#axt[j].set_ylim(ymin, ymax )
xticks=axt[j].xaxis.get_majorticklocs()
xticks=map(float,xticks)
sp=1.*nres
to_remove=[]
for x in xticks :
if abs((x-start+start_rescount)/sp)<0.1 : to_remove.append(x)
elif abs((start+nres+start_rescount-1-x)/sp)<0.1 : to_remove.append(x)
for x in to_remove :
xticks.remove(x)
xticks+=[start+start_rescount,start+nres+start_rescount-1]
axt[j].set_xticks(xticks)
axt[j].set_xlim(xlim_m,xlim_M)
#handle_grid( axt[j] , vgrid=False , hgrid=hgrid ) # custom vgrid for this plot
if vgrid :
if type(vgrid) is list or type(vgrid) is tuple :
for vl in vgrid : axt[j].axvline(vl,color='grey',ls=':')
else :
for count in range(start+start_rescount,start+nres+start_rescount) :
if type(vgrid) is int and count%vgrid==0:
axt[j].axvline(count,color='grey',ls=':')
elif count%10==0:
axt[j].axvline(count,color='grey',ls=':')
# axt[j].annotate(sequence[count-1], xy=(count,ymin), xytext=(0, -5),rotation=rotation, textcoords='offset points', va='top', ha='center',size='small')
ax2=None
if ( sequence!=None or annotation_string!=None ) and frame!=False:
ax2=axt[j].twiny()
ax2.set_xlim(axt[j].get_xlim())
if print_all_sequence==True : ju=1
elif type(print_all_sequence) is int : ju=print_all_sequence
else : ju=3
ax2.set_xticks(range(start+start_rescount,start+nres+start_rescount),minor=True) #
ax2.set_xticks(range(start+start_rescount,start+nres+start_rescount,ju))
ax2_ticks=ax2.get_xticks()
if sequence!=None :
ax2.set_xticklabels(sequence[start:start+nres:ju],rotation=upper_label_rotation,verticalalignment='bottom',fontsize=text_sizes['xlabels_many'])
#an=list(sequence[start:start+nres:ju])
#for ja,xt in enumerate(ax2_ticks[::ju]) :
# axt[j].annotate( an[ja], (xt,ymax),(0,5), xycoords='data' \
# , size=text_sizes['xlabels_many'],textcoords = 'offset points', ha = 'center', va = 'bottom' )
if annotation_string!=None :
an=list(annotation_string[start:start+nres:ju])
if len(an)>=len(ax2_ticks) :
for ja,xt in enumerate(ax2_ticks) :
axt[j].annotate( an[ja], (xt,ymax),(0,-5), xycoords='data' \
, size=text_sizes['xlabels_many'],textcoords = 'offset points', ha = 'center', va = 'top' )
else :
sys.stderr.write("Warn in plot_seq_profile. Not plotting annotation_string as length of processed str is larger than number of ticks on top axis. [%d %d]\n" % (len(an),len(ax2_ticks)))
if not frame :
# this remove top and right axis
print 'Removing frame'
axt[j].spines["right"].set_visible(False)
axt[j].spines["top"].set_visible(False)
axt[j].get_xaxis().tick_bottom() # ticks only on bottom axis
axt[j].get_yaxis().tick_left() # ticks only on left axis
if ax2!=None :
ax2.spines["right"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.get_xaxis().tick_bottom() # ticks only on bottom axis
ax2.get_yaxis().tick_left() # ticks only on left axis
start=start+nres
#yticks=axt[0].yaxis.get_majorticklocs() # the y axis is shared
#yticks=map(float,yticks)
#yticks.remove(min(yticks))
#axt[0].set_yticks(yticks)
axt[0].set_ylim(ymin, ymax )
if xlabel!=None : fig.text(0.5, 0.03, xlabel,fontsize=text_sizes['xlabel'], ha='center', va='center')
if ylabel!=None :fig.text(0.015, 0.5, ylabel,fontsize=text_sizes['ylabel'],rotation='vertical', ha='center', va='center')
if title!=None : fig.text(0.5, 0.97, title,horizontalalignment='center',fontsize=text_sizes['title'])
if label!='' and label!=None :
if type(label) is not list and type(label) is not tuple : label=[label]
legend=fig.legend(line_styles, label, loc=legend_location,prop={'size':legend_size},frameon=True,framealpha=0.5)
legendframe = legend.get_frame()
legendframe.set_facecolor((1.,1.,1.,0.7))
if do_tight : fig.tight_layout(pad=3.5,h_pad=1.08,w_pad=1.08,rect=(0, 0, 1, 1))
#if default_figure_sizes['all_tight'] : figure.tight_layout()
else :
raise Exception("Not implemented, set use_subplot to True")
plt.draw()
if save!=None and save!='' :
if '.' not in save : save+='.pdf'
fig.savefig(save, dpi=default_figure_sizes['dpi'],bbox_inches="tight",transparent=True) # bbox_inches=0 remove white space around the figure.. ,
if show :
plt.show(block=False)
return fig,axt
def add_custom_legend(labels,facecolors,edgecolors=None,marker_types=None,markersize=None,linewidth=None,figure=None,proxy_point=1,frame=True, legend_location='upper right', legend_size=None,frame_facecolor=None,shadow=False,framealpha=None):
'''
this will draw a custom legend to the existing figure (or to figure if given)
if you want to represent a line for label j give facecolor[j]=None and the desired edgecolor,
if you wish to represent a marker give marker_types[j] != None
frame_facecolor=(1.,1.,1.,0.7) give alpha of 0.7 to a white background
'''
if legend_size==None : legend_size=text_sizes['legend_size']
proxy_point=int(proxy_point)
if type(labels) is not list : labels=[labels]
if type(facecolors) is not list : facecolors=[facecolors]*len(labels)
if type(edgecolors) is not list :
if edgecolors==None : edgecolors=[None]*len(labels)
else : edgecolors=[edgecolors]*len(labels)
if type(marker_types) is not list :
if marker_types==None : marker_types=[None]*len(labels)
else : marker_types=[marker_types]*len(labels)
if type(markersize) is not list :
markersize=[markersize]*len(labels)
if type(linewidth) is not list :
if linewidth==None : linewidth=[None]*len(labels)
else : linewidth=[linewidth]*len(labels)
proxy_artists=[]
for j in xrange(len(labels)) :
if marker_types[j]!=None :
pro = plt.Line2D(range(proxy_point), range(proxy_point), color="white", marker=marker_types[j],markersize=markersize[j], markerfacecolor=facecolors[j],markeredgecolor=edgecolors[j],linewidth=linewidth[j])
elif facecolors[j]==None :
pro = plt.hlines(y=proxy_point, xmin=proxy_point, xmax=proxy_point, color=edgecolors[j],linewidth=linewidth[j])
else :
pro = plt.Rectangle((proxy_point, proxy_point), 0, 0, facecolor=facecolors[j],edgecolor=edgecolors[j],linewidth=linewidth[j]) #,linewidth=2)
proxy_artists+=[pro]
if figure!=None :
if type(legend_location) is tuple :leg=figure.legend( proxy_artists , labels,frameon=frame,numpoints=1, bbox_to_anchor=legend_location,prop={'size':legend_size}, shadow=shadow,framealpha=framealpha)
else :leg=figure.legend( proxy_artists , labels,frameon=frame,numpoints=1, loc=legend_location, prop={'size':legend_size}, shadow=shadow,framealpha=framealpha)
else :
if type(legend_location) is tuple :leg=plt.legend( proxy_artists, labels,frameon=frame,numpoints=1, bbox_to_anchor=legend_location, prop={'size':legend_size}, shadow=shadow,framealpha=framealpha)
else :leg=plt.legend( proxy_artists , labels,frameon=frame,numpoints=1, loc=legend_location,prop={'size':legend_size}, shadow=shadow,framealpha=framealpha)
if frame_facecolor!=None :
#leg=fig.legend(line_styles, label, loc=legend_location,prop={'size':legend_size},frameon=True,framealpha=0.5)
legendframe = leg.get_frame()
legendframe.set_facecolor(frame_facecolor)
plt.draw()
return leg
def s2D_profile_to_color(output_matrices,color_rgb_list=[(0,0,1),(0,1,0),(1,0,0),(1.0, 1.0, 0.0)],add_PPII_to_coil=False):
try :
import chroma
except ImportError :
class chroma :
def Color(self,*args,**kwargs):
raise Exception("chroma MODULE NOT AVAILABLE. Cannot run s2D_profile_to_color\n in terminal try running 'pip install chroma'\n")
if hasattr(output_matrices, 'shape') : # only one sequence
output_matrices=[output_matrices] # mimic output from file with more sequences
elif type(output_matrices) is str :
seq_names, sequences, output_matrices, annotation_str =read_output(output_matrices, add_PPII_to_coil=add_PPII_to_coil)
starting_cols=[ chroma.Color(c,format='RGB') for c in color_rgb_list]
#print starting_cols
out_color_list=[]
for i,mat in enumerate(output_matrices) :
out_color_list+=[[]]
for res_out in mat :
rc=None
for j,x in enumerate(res_out) :
nc=list(starting_cols[j].hsv)
nc[1]=x
#print nc,starting_cols[j].hsv,res_out
if not hasattr(rc, 'rgb') : rc=chroma.Color( nc, format='HSV')
else : rc -= chroma.Color( nc, format='HSV')
out_color_list[-1]+=[rc.rgb]
return out_color_list
def read_output(filename,force_float=True, add_PPII_to_coil=False, verbose=True):
'''
reads an output file of the s2D class
return seq_names,sequences,output_matrices,annotation_str
annotation_str will contain the content of eventual str columns (such the ss kind for s2D)
output_matrices is a list of numpy 3D arrays with all the float output, one column per neuron (secondary structure type).
one array per sequence.
'''
#content=file(filename).read().splitlines()
sequences=[]
seq_names=[]
annotation_str=[]
output_matrices=[ ]
for line in open(filename) :
if len(line)<1 : continue
if line[0]=='>' :
seq_names.append( line[1:].strip() )
sequences.append('')
annotation_str.append('')
output_matrices.append( [] )
elif line[0]!='#' :
if sequences==[] : # probably single-sequence output file
sequences.append('')
annotation_str.append('')
output_matrices.append( [] )
if verbose : print "s2D_class.read_output() -> Reading %s as a single-sequence output file!"
line=line.split()
if line!=[] :
sequences[-1]+=line[1]
output=[]
ann=0
for el in line[2:] :
el,isnumber = convert_to_number(el, force_float=force_float)
if isnumber :
output.append(el)
elif ann==0 :
annotation_str[-1]+=el
ann+=1
else :
print '**W** ii s2D_class.read_output() Too many annotation strings in line %s' % (str(line))
output_matrices[-1].append(output)
for j,out in enumerate(output_matrices) :
if add_PPII_to_coil and len(out[0])>3 : output_matrices[j]=numpy.hstack( ( numpy.array(out)[:,:2], numpy.array(out)[:,2:].sum(axis=1)[:,numpy.newaxis ] ) )
else : output_matrices[j]=numpy.array(out)
return seq_names,sequences,output_matrices,annotation_str
def average_multinetwork_prediction(list_of_network, seq_input_vec,postprocess=None):
'''
designed to average the results of multiple Seq network in case
different sliding window sizes are employed (or any other difference in the network parameters)
'''
n=float(len(list_of_network))
Pred=list_of_network[0].predict(seq_input_vec)
for net in list_of_network[1:] :
Pred += net.predict(seq_input_vec) # sum and average
Pred/=n
if postprocess!=None and hasattr(postprocess, '__call__') :
Pred=postprocess(Pred)
return Pred
def run_net(net,input_vec, postprocess=None):
#print input_vec.shape
if len(input_vec)==0 :
raise Exception("**ERROR** can't perform prediction, EMPTY input vector")
Pred=net.predict(input_vec)
if postprocess!=None :
Pred=postprocess(Pred)
return Pred
"""
BLAST FUNCTIONS
"""
chkparseC="""
/* On the first usage this gets printed in a .c file which is authomatically compiled */
/* content of the chkparse C script used to make the checkpoint file from psiblast more user friendly */
/* chkparse - generate PSIPRED compatible mtx file from BLAST+ checkpoint file */
/* V0.3 */
/* Copyright (C) 2010 D.T. Jones */
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <string.h>
#define MAXSEQLEN 65536
#define EPSILON 1e-6
#define FALSE 0
#define TRUE 1
#define SQR(x) ((x)*(x))
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
const char *ncbicodes = "*A*CDEFGHIKLMNPQRSTVWXY*****";
/* BLOSUM 62 */
const short aamat[23][23] =
{
{4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1},
{0, -3, -3, -3,10, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1},
{0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2},
{1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0},
{0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1},
{0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1},
{0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, 4}
};
/* Standard BLAST+ a.a. frequencies */
float aafreq[26] =
{
0.00000, 0.07805, 0.00000, 0.01925, 0.05364, 0.06295, 0.03856, 0.07377, 0.02199, 0.05142, 0.05744, 0.09019,
0.02243, 0.04487, 0.05203, 0.04264, 0.05129, 0.07120, 0.05841, 0.06441, 0.01330, 0.00000, 0.03216, 0.00000,
0.00000, 0.00000
};
/* PSSM arrays */
float fratio[MAXSEQLEN][28], pssm[MAXSEQLEN][28];
/* Dump a rude message to standard error and exit */
void
fail(char *errstr)
{
fprintf(stderr, "\\n*** %s\\n\\n", errstr);
exit(-1);
}
/* Convert AA letter to numeric code (0-22 in 3-letter code order) */
int aanum(int ch)
{
static const int aacvs[] =
{
999, 0, 20, 4, 3, 6, 13, 7, 8, 9, 22, 11, 10, 12, 2,
22, 14, 5, 1, 15, 16, 22, 19, 17, 22, 18, 21
};
return (isalpha(ch) ? aacvs[ch & 31] : 22);
}
/* Scan ahead for file tokens */
void findtoken(char *buf, char *token, FILE *ifp)
{
for (;;)
{
if (fscanf(ifp, "%s", buf) != 1)
fail("Cannot find token in checkpoint file!");
if (!token[0] || !strcmp(buf, token))
break;
}
}
/* Read hex sequence string */
int readhex(char *seq, FILE *ifp)
{
int ch, aa, nres=0;
while ((ch = fgetc(ifp)) != EOF)
if (ch == '\\'')
break;
if (ch == EOF)
fail("Bad sequence record in checkpoint file!");
for (;;)
{
ch = fgetc(ifp);
if (ch == '\\'')
break;
if (isspace(ch))
continue;
if (!isxdigit(ch))
fail("Bad sequence record in checkpoint file!");
if (ch >= 'A')
aa = 16 * (10 + ch - 'A');
else
aa = 16 * (ch - '0');
ch = fgetc(ifp);
if (!isxdigit(ch))
fail("Bad sequence record in checkpoint file!");
if (ch >= 'A')
aa += 10 + ch - 'A';
else
aa += ch - '0';
if (nres > MAXSEQLEN)
break;
seq[nres++] = aa;
}
return nres;
}
/* This routine will extract PSSM data from a BLAST+ checkpoint file */
int getpssm(char *dseq, FILE *ifp)
{
int i, j, len;
float pssmrow[28], val, base, power;
char buf[4096];
findtoken(buf, "", ifp);
if (strcmp(buf, "PssmWithParameters"))
fail("Unknown checkpoint file format!");
findtoken(buf, "numColumns", ifp);
if (fscanf(ifp, "%d", &len) != 1)
fail("Unknown checkpoint file format!");
findtoken(buf, "ncbistdaa", ifp);
if (len != readhex(dseq, ifp))
fail("Mismatching sequence length in checkpoint file!");
findtoken(buf, "freqRatios", ifp);
findtoken(buf, "", ifp);
for (i=0; i<len; i++)
for (j=0; j<28; j++)
{
findtoken(buf, "", ifp);
findtoken(buf, "", ifp);
if (sscanf(buf, "%f", &val) != 1)
fail("Unknown checkpoint file format!");
findtoken(buf, "", ifp);
if (sscanf(buf, "%f", &base) != 1)
fail("Unknown checkpoint file format!");
findtoken(buf, "", ifp);
if (sscanf(buf, "%f", &power) != 1)
fail("Unknown checkpoint file format!");
findtoken(buf, "", ifp);
fratio[i][j] = val * pow(base, power);
}
findtoken(buf, "scores", ifp);
findtoken(buf, "", ifp);
for (i=0; i<len; i++)
for (j=0; j<28; j++)
{
findtoken(buf, "", ifp);
if (sscanf(buf, "%f", &val) != 1)
fail("Unknown checkpoint file format!");
pssm[i][j] = val;
}
return len;
}
int roundint(double x)
{
x += (x >= 0.0 ? 0.5 : -0.5);
return (int)x;
}
int main(int argc, char **argv)
{
int i, j, seqlen=0, nf;
char seq[MAXSEQLEN];
double scale, x, y, sxx, sxy;
FILE *ifp;
int use_psipred_format=0;
if (argc != 2)
fail("Usage: chkparse chk-file");
ifp = fopen(argv[1], "r");
if (!ifp)
fail("Unable to open checkpoint file!");
seqlen = getpssm(seq, ifp); // read the sequence from input file and save its length
if (seqlen < 5 || seqlen >= MAXSEQLEN)
fail("Sequence length error!");
/* Estimate original scaling factor by weighted least squares regression */
for (sxx=sxy=i=0; i<seqlen; i++)
for (j=0; j<26; j++)
if (fratio[i][j] > EPSILON && aafreq[j] > EPSILON)
{
x = log(fratio[i][j] / aafreq[j]);
y = pssm[i][j];
sxx += (y*y) * x * x; /* Weight by y^2 */
sxy += (y*y) * x * y;
}
scale = 100.0 * sxy / sxx;
if(use_psipred_format)
{
printf("%d\\n", seqlen); // print sequence length
for (i=0; i<seqlen; i++) // print actual sequence
putchar(ncbicodes[seq[i]]);
printf("\\n0\\n0\\n0\\n0\\n0\\n0\\n0\\n0\\n0\\n0\\n0\\n0\\n");
for (i=0; i<seqlen; i++)
{
for (j=0; j<28; j++)
if (ncbicodes[j] != '*')
{
if (fratio[i][j] > EPSILON)
printf("%d ", roundint(scale * log(fratio[i][j] / aafreq[j])));
else
printf("%d ", 100*aamat[aanum(ncbicodes[seq[i]])][aanum(ncbicodes[j])]);
}
else
printf("-32768 ");
putchar('\\n');
}
}else
{
//print header
printf(" ");
for (j=0; j<28; j++)
if (ncbicodes[j] != '*')
printf(" %c ",ncbicodes[j]);
putchar('\\n');
for (i=0; i<seqlen; i++)
{
printf("%5d %c ",i+1,ncbicodes[seq[i]]);
for (j=0; j<28; j++)
if (ncbicodes[j] != '*')
{
if (fratio[i][j] > EPSILON)
printf(" %5.2lf ", roundint(scale * log(fratio[i][j] / aafreq[j]))*0.01 );
else
printf(" %5.2lf ", 1.*aamat[aanum(ncbicodes[seq[i]])][aanum(ncbicodes[j])]);
}
putchar('\\n');
}
}
return 0;
}
"""
def psiblast_checkpoint(sequence, psi_blast_database_no_last_extension, BLAST_PATH='',sequence_name='to_blast',BLASTFILE=None,parser_executable=default_parser_executable, c_compiler=default_c_compiler,temporary_file_directory='',num_iterations=3,ncpu=2,str_content_of_chkparse=chkparseC,psi_blast_file=None):
'''
run psiblast but reads results from a checkpoint file so that the scoring_matrix results are not rounded down.
'''
if BLASTFILE==None : BLASTFILE=temporary_file_directory+'blast_tmp.txt'
database=psi_blast_database_no_last_extension
if not os.path.isfile(parser_executable) :
if os.path.isfile('chkparse.c') :
os.system(c_compiler+' -O chkparse.c -lm -o '+parser_executable)
elif str_content_of_chkparse!=None :
out=open('chkparse.c','w')
out.write(str_content_of_chkparse)
out.close()
os.system(c_compiler+' -O chkparse.c -lm -o '+parser_executable)
else :
raise IOError('***ERROR*** in psiblast_checkpoint() cant find chkparse (nor chkparse.c) in current folder and neither can find %s (its a compiled c file and its required)\n' % (parser_executable) )
if BLAST_PATH!='' and not os.path.isfile(BLAST_PATH+'makeblastdb') : #check if the BLAST_PATH is correct
raise IOError('***ERROR*** in psiblast_checkpoint() path %s doesnt lead to blast directory where makeblastdb should be located' % (BLAST_PATH) )
if psi_blast_file==None :
psi_blast_file='psiblast_'+sequence_name.replace('|','').replace(' ','_').replace('/','').replace(':','_')+'.txt'
pid=str(os.getpid())
seq_file = file(temporary_file_directory+'sequence_tmp'+pid+'.fa','w')
seq_file.write('> '+sequence_name+'\n'+sequence+'\n')
seq_file.close()
#check if the blast database has already been built, if not build it
if (not ( os.path.isfile(database+'.phr') and os.path.isfile(database+'.pin') and os.path.isfile(database+'.psq')) and (not os.path.isfile(database+'.pal')) ) : # .pal is for large datasets
sys.stderr.write('\n******** WARNING ********\n==> the blast database provided (%s) has not yet been compiled by makeblastdb\n Running makeblastdb... (this will take a VERY LONG TIME, but needs to be done only once unless the database is deleted/renamed).\n this may also print some ERROR messages "Error: (1431.1) FASTA-Reader:..." which can safely be ignored\n******** ********\n' % (database))
sys.stderr.flush()
try :
os.system(BLAST_PATH+'makeblastdb -dbtype prot -in '+database)
except :
print '***ERROR*** in psiblast_checkpoint() cannot build blast database %s maybe you whish to set BLAST_PATH to correct directory' % (database)
raise
# psipred command: "psiblast -db $dbname -query $tmproot.fasta -inclusion_ethresh 0.001 -out_pssm $tmproot.chk -num_iterations 3 -num_alignments 0 >& $tmproot.blast"
if WEBSERVER_MODE : add=' 2> /dev/null'
else : add=''
os.system(BLAST_PATH+'psiblast -query '+temporary_file_directory+'sequence_tmp'+pid+'.fa'+' -db '+database+' -num_iterations '+str(num_iterations)+' -inclusion_ethresh 0.001 -out_pssm '+temporary_file_directory+'tmproot'+pid+'.chk -num_alignments 0 -num_threads '+str(ncpu)+' > '+BLASTFILE+add)
os.system(parser_executable+' '+temporary_file_directory+'tmproot'+pid+'.chk > '+psi_blast_file)
os.system('rm -f '+temporary_file_directory+'tmproot'+pid+'.chk '+temporary_file_directory+'sequence_temp'+pid+'.fa')
return psi_blast_file
def parse_psiblast_checkpoint_output(psi_blast_filename):
'''
# parse files generated by psi_blast() with psipred like options
# it returns entries,aa_header
# entries=[] # list of dictionary, one per position in the sequence, keys are 'id','aa', and 'scoring_matrix'
# aa_header list of the 20 residues in the order they appear in entry (both in 'occurrence' and in 'scoring_matrix')
'''
full_file=file(psi_blast_filename).read().splitlines()
entries=[] # list of dictionary, one per position in the sequence
aa_header=[]
try :
for i,line in enumerate(full_file) :
#line=line.split()
if line!='' and len(line)>6 :
if line[:6].strip().isdigit() :
line=line.split()
entries+=[ {} ]
entries[-1]['id']=int(line[0])
entries[-1]['aa']=line[1]
entries[-1]['scoring_matrix']=map(float,line[2:])
else :
line=line.split()
if line[0]=='A' : # read the header
aa_header=line
except Exception :
sys.stderr.write("**ERROR** in parse_psiblast_checkpoint_output() while parsing %s at line %d\n" % (psi_blast_filename,i))
raise
return entries,aa_header
# just returns the psi_blast_file for analyisis (it does not parse it!)
# standard database is composed of three files Fragment.fasta.pin Fragment.fasta.psq Fragment.fasta.phr, you should only input Fragment.fasta. If the three files are not
# found the database is generated
def psi_blast(sequence, psi_blast_database_no_last_extension, BLAST_PATH='/usr/local/ncbi/blast/bin/',sequence_name='to_blast',BLASTFILE='blast_tmp.txt',num_iterations=3,ncpu=1,psi_blast_file=None):
database=psi_blast_database_no_last_extension
if BLAST_PATH!='' and not os.path.isfile(BLAST_PATH+'makeblastdb') : #check if the BLAST_PATH is correct
raise IOError('***ERROR*** in psi_blast() path %s doesnt lead to blast directory where makeblastdb should be located' % (BLAST_PATH) )
if psi_blast_file==None :
psi_blast_file='psiblast_'+sequence_name.replace('|','').replace(' ','_').replace('/','').replace(':','_')+'.txt'
seq_file = file('sequence_temp.txt','w')
seq_file.write('> '+sequence_name+'\n'+sequence+'\n')
seq_file.close()
#check if the blast database has already been built, if not build it
if (not ( os.path.isfile(database+'.phr') and os.path.isfile(database+'.pin') and os.path.isfile(database+'.psq')) and (not os.path.isfile(database+'.pal')) ) : # .pal is for large datasets
try :
sys.stderr.write('\n==> the blast database provided (%s) has not yet been compiled by makeblastdb\n Running makeblastdb... (this will take a LONG TIME, but needs to be done only once unless the database is deleted/renamed).\n' % (database))
sys.stderr.flush()
os.system(BLAST_PATH+'makeblastdb -dbtype prot -in '+database)
except :
sys.stderr.write( '\n\n***ERROR*** in psi_blast() cannot build blast database %s maybe you whish to set BLAST_PATH to correct directory\n' % (database))
raise
if WEBSERVER_MODE : add=' 2> /dev/null'
else : add=''
os.system(BLAST_PATH+'psiblast -query sequence_temp.txt -db '+database+' -num_iterations '+str(num_iterations)+' -out_ascii_pssm '+psi_blast_file+' -out '+BLASTFILE+' -num_threads '+str(ncpu)+add)
os.system('rm -f sequence_temp.txt')
return psi_blast_file
# parse files generated by psi_blast() with -out_ascii_pssm option.
# it returns entries,aa_header,general_results
# entries=[] # list of dictionary, one per position in the sequence
# aa_header list of the 20 residues in the order they appear in entry (both in 'occurrence' and in 'scoring_matrix')
# general_results contains general results on the alignment such as 'Standard_UngappedK/L'
def parse_psiblast_output(psi_blast_filename, percentage_to_fractions=False, calculate_entropy_of_profile=True):
'''
# parse files generated by psi_blast() with -out_ascii_pssm option.
# it returns entries,aa_header,general_results
# entries=[] # list of dictionary, one per position in the sequence, keys are 'id','aa', 'occurrence' and 'scoring_matrix'
# aa_header list of the 20 residues in the order they appear in entry (both in 'occurrence' and in 'scoring_matrix')
# general_results contains general results on the alignment such as 'Standard_UngappedK/L'
# if calculate_entropy_of_profile then it contains also the entropy exp(sum(pi log(pi))) where pi is the number of different amino acids that appear in the alignment
'''
full_file=file(psi_blast_filename).read().splitlines()
entries=[] # list of dictionary, one per position in the sequence
aa_header=[]
general_results={}
try :
if calculate_entropy_of_profile :
entropy=0.
for i,line in enumerate(full_file) :
#line=line.split()
if line!='' and len(line)>6 :
if line[:6].strip().isdigit() :
if 161<=len(line)<=165 : # psiblat version quite old, 165 shoudl not be the case, it happens sometimes when the occurrences are all zeros
matrix_st, matrix_end,matrix_every,occ_start,occ_end = 9,69,3,70,150
elif 181<=len(line)<=185 : # recent psiblat version
matrix_st, matrix_end,matrix_every,occ_start,occ_end = 9,89,4,90,170
else :
raise Exception("Line length of %d not recognized" % (len(line)))
entries+=[ {} ]
entries[-1]['id']=int(line[:6].strip())
entries[-1]['aa']=line[6]
entries[-1]['scoring_matrix']=[int(score) for score in split_every(line[matrix_st:matrix_end],matrix_every) ]
if percentage_to_fractions : entries[-1]['occurrence']=[float(score)/100. for score in line[occ_start:occ_end].split() ]
else : entries[-1]['occurrence']=[int(score) for score in line[occ_start:occ_end].split() ]
if calculate_entropy_of_profile :
if percentage_to_fractions : tmp=numpy.array( entries[-1]['occurrence'] )
else : tmp=numpy.array( entries[-1]['occurrence'] )/100.
if all(tmp<=0.000001) :
entropy += 1.
else :
C_i = sum(tmp*numpy.log(tmp+0.00000001))# sum_over_20_aa( frequency * log(frequency))
entropy += numpy.exp( - C_i ) # exp( -sum )
entries[-1]['information']=float(line[occ_end:occ_end+7])
entries[-1]['gapless_match_to_pseudocount']=float(line[occ_end+7:])
else :
line=line.split()
if line[0]=='A' : # read the header
aa_header=line[:20]
elif 'Standard'==line[0] and 'Ungapped'==line[1] :
general_results['Standard_UngappedK/L']=[float(l) for l in line[2:]]
elif 'Standard'==line[0] and 'Gapped'==line[1] :
general_results['Standard_GappedK/L']=[float(l) for l in line[2:]]
elif 'PSI'==line[0] and 'Ungapped'==line[1] :
general_results['PSI_UngappedK/L']=[float(l) for l in line[2:]]
elif 'PSI'==line[0] and 'Gapped'==line[1] :
general_results['PSI_GappedK/L']=[float(l) for l in line[2:]]
if calculate_entropy_of_profile :
entropy/=(1.*len(entries))
general_results['entropy']=entropy
except Exception :
sys.stderr.write("\n**ERROR** in parse_psiblast_output() while parsing %s at line %d\n" % (psi_blast_filename,i))
raise
return entries,aa_header,general_results
def split_every(staff, num):
'''
split a string a list or a tuple every num elements
'''
return [ staff[start:start+num] for start in range(0, len(staff), num) ]
def loose_compare_keyword(keyword, listt , begin_with=False, end_with=False):
'''
see if keyword is contained in any of the elements in listt.
Only the first element satisfying this condition is returned
if begin_with keyword has to be found at the beginning of the element,
if end_with keyword has to be found at the end of the element.
if both are True keyword has to be either at the end or at the beginning of the element
it returns True and the first matching element or False and None
'''
le=len(keyword)
if begin_with and end_with :
for k in listt :
if len(k) >= le :
if keyword==k[:le] or keyword==k[-le:] :
return True,k
return False,None
if begin_with :
for k in listt :
if len(k)>= le :
if keyword==k[:le] :
return True,k
return False,None
if end_with :
for k in listt :
if len(k)>= le :
if keyword==k[-le:] :
return True,k
return False,None
for k in listt :
if keyword in k : return True,k
return False,None
class s2D_method :
def __init__(self, parameter_class=parameter_filename ,network_files=None, deep_learning_net_file=None,Nto1_netfile=None, postprocess=normalize_numpy,Nto1Postprocess=None,multiply_before_DL=10., use_psiblast=True, net_trained_on_scoring_matrix=True
,psiblast_ncpu=2,uniref90_psi_blast_database=None,use_psiblast_checkpoint=True,temporary_file_directory='/tmp/',keep_pssm_profile_files=True,folder_with_psiblast_files='psiblast_files/' \
,out_tags=['Helix','Beta','Coil','Polyproline-II']):
if parameter_class!=None and network_files==None :
if type(parameter_class) is str :
tmp=parameter_class
parameter_class=s2D.s2D_parameters()
parameter_class.read(tmp)
network_files=parameter_class.networks
deep_learning_net_file=parameter_class.DL_network
temporary_file_directory=parameter_class.temporary_file_directory
folder_with_psiblast_files=parameter_class.psiblast_files_folder
uniref90_psi_blast_database=parameter_class.psiblast_database
use_psiblast_checkpoint=parameter_class.use_psiblast_checkpoint
psiblast_ncpu=parameter_class.psiblast_ncpu
keep_pssm_profile_files=parameter_class.keep_pssm_profile_files
elif network_files==None :
sys.stderr.write("**WARNING** parameter file not found or not declared\n")
self.network_files=network_files
self.Nto1_net_list=None
self.multiply_before_DL=multiply_before_DL
self.max_window_size=0
if self.multiply_before_DL==None : self.multiply_before_DL=1.
if type(self.network_files) is list or type(self.network_files) is tuple :
self.net=[]
self.Nto1_net_list=[]
try :
for j,net in enumerate(self.network_files) :
self.net += [ PyELM.loadModel(net,verbose=not WEBSERVER_MODE) ]
self.net[-1].name=net
if isinstance(self.net[-1],PyELM.WNto1) or isinstance(self.net[-1],PyELM.Nto1) : self.Nto1_net_list+=[j]
elif self.net[-1].win>self.max_window_size : self.max_window_size=self.net[-1].win
if not WEBSERVER_MODE: print 'loaded %d networks' % (len(self.net))
if self.Nto1_net_list!=[] :
remove=sorted(self.Nto1_net_list,reverse=True)
self.Nto1_net_list=[ self.net[j] for j in remove ]
for j in remove : del self.net[j]
if not WEBSERVER_MODE: print ' of which %d Nto1 networks: %s' % (len(self.Nto1_net_list),' '.join([n.name for n in self.Nto1_net_list]))
self.Nto1_net_list.sort(key=lambda x : x.name )
else :
self.Nto1_net_list=None
self.net.sort(key=lambda x : x.name )
if not WEBSERVER_MODE: print 'Sorted:',[n.name for n in self.net]
except Exception :
sys.stderr.write('\n**ERROR** probably when attempting to load network %s,\n maybe the file is not in the correct directory\n\n' % (net))
raise
elif network_files!=None :
self.net=PyELM.loadModel(self.network_files,verbose=not WEBSERVER_MODE)
self.deep_learning_net_file=deep_learning_net_file
if self.deep_learning_net_file!=None :
self.DL=True
self.DL_net=PyELM.loadModel(self.deep_learning_net_file,verbose=not WEBSERVER_MODE)
self.DL_net.name=self.deep_learning_net_file
if 'win' in dir(self.DL_net) and self.DL_net.win>self.max_window_size : self.max_window_size=self.DL_net.win
elif 'window' in dir(self.DL_net) and self.DL_net.window>self.max_window_size : self.max_window_size=self.DL_net.window
else :
self.DL=False
self.Nto1Postprocess=Nto1Postprocess
self.Nto1_net=None
self.Nto1_netfile=Nto1_netfile
self.run_Nto1_first=False # this is an option never tested that allows to run an Nto1 network before anything else, and then use it as a deep learning
if self.Nto1_net==None and self.Nto1_netfile!=None :
self.Nto1_net=PyELM.loadModel(self.Nto1_netfile,verbose=not WEBSERVER_MODE)
self.run_Nto1_first=True
self.postprocess=postprocess
self.out_tags=out_tags
self.psiblast_ncpu=psiblast_ncpu
self.uniref90_psi_blast_database=uniref90_psi_blast_database
self.psi_blast_path=''
self.use_psiblast_checkpoint=use_psiblast_checkpoint
self.keep_pssm_profile_files=keep_pssm_profile_files
if not os.path.isdir(temporary_file_directory) :
sys.stderr.write("**Warning declared temporary_file_directory %s does not seem to exist, using the current directory.\n" % (str(temporary_file_directory)))
temporary_file_directory=''
elif temporary_file_directory[-1]!='/' : temporary_file_directory+='/'
self.temporary_file_directory=temporary_file_directory
self.folder_with_psiblast_files=folder_with_psiblast_files
self.function_residue_to_input=residue_to_input
self.use_psiblast=use_psiblast
self.seq_name=None
self.net_trained_on_scoring_matrix=net_trained_on_scoring_matrix
def run_on_psiblast_file(self, psiblast_file,input_sequence=None):
inp,sequence=self.input_from_psiblast_file(psiblast_file,input_sequence=input_sequence, delete_pssm_file=False)
if inp==None : return None,None,None
if self.run_Nto1_first :
Nto1_out,_=self.run_Nto1(sequence, read_staff_from_psiblastfile=None, seq_name=self.seq_name)
numpy.vstack((Nto1_out,inp))
if not self.DL : # just run the networks if no Deep learning is requested
if type(self.net) is list :
output=average_multinetwork_prediction(self.net, inp, postprocess=self.postprocess)
else :
output=run_net(self.net, inp, postprocess=self.postprocess)
else :
# run networks
self.net_output=run_net(self.net[0], inp, postprocess=None) # do not postprocess before DL!
for ne in self.net[1:] :
self.net_output= numpy.hstack((self.net_output, run_net( ne, inp, postprocess=None) ) ) # do not postprocess before DL!
self.dl_input=numpy.hstack((inp, self.multiply_before_DL* self.net_output))
# run N-to-1 network
if self.Nto1_net_list!=None :
self.n1_out=run_net(self.Nto1_net_list[0],[self.dl_input[:,:self.Nto1_net_list[0].dimInput]], postprocess=self.Nto1Postprocess)[0] # run the Nto1, This won't have one vertical output per amino acid, but only one for the whole sequence
for Nto1 in self.Nto1_net_list[1:] :
self.n1_out= numpy.hstack( (self.n1_out, run_net(Nto1,[self.dl_input[:,:Nto1.dimInput]], postprocess=self.Nto1Postprocess)[0] )) # run the Nto1
self.dl_input=numpy.hstack((self.dl_input, self.multiply_before_DL*numpy.array(self.n1_out)*numpy.ones( (inp.shape[0],1) ) ))
# run DL network
output=run_net(self.DL_net, self.dl_input, postprocess=self.postprocess)
return inp,sequence, output
def input_from_psiblast_file(self, psiblast_file,input_sequence=None,delete_pssm_file=False):
if self.use_psiblast_checkpoint :
entries,_ = parse_psiblast_checkpoint_output(psiblast_file )
#print 'entries',len(entries),len(entries[0]['scoring_matrix'])
else :
entries,_,_ = parse_psiblast_output(psiblast_file , percentage_to_fractions=True,calculate_entropy_of_profile=False)
add_aa_specific_neurone=False # determine whether we should add a specific neurone or not
if entries==[] :
sys.stderr.write('\n***WARNING*** blast file %s is probably empty.. reblasting...\n' % (psiblast_file.split('/')[-1]))
sys.stderr.flush()
return None,None
elif len(entries[0]['scoring_matrix'])==self.net[0].numID-1:
add_aa_specific_neurone=True
sequence=''
if self.net_trained_on_scoring_matrix or self.use_psiblast_checkpoint :
inp=numpy.array(entries[0]['scoring_matrix'])
sequence+=entries[0]['aa']
for en in entries[1:] :
inp=numpy.vstack((inp, numpy.array(en['scoring_matrix'])))
sequence+=en['aa']
#print 'inp',inp.shape
else :
inp=numpy.array(entries[0]['occurrence'])
sequence+=entries[0]['aa']
for en in entries[1:] :
inp=numpy.vstack((inp, numpy.array(en['occurrence'])))
sequence+=en['aa']
if input_sequence!=None and input_sequence!=sequence :
sys.stderr.write('*WARNING* sequence read from blast file %s is different from the one given as input.. reblasting...' % (psiblast_file))
sys.stderr.flush()
if delete_pssm_file :os.system('rm -f %s' % (psiblast_file))
return None,None
if add_aa_specific_neurone :
sp=[]
for s in sequence :
sp.append(residue_to_input(s, numb_neurones=1,out_in_range=(-10.,10.), use_rank=res_closness_blosum62,nres=20))
#print len(sp),inp.shape
inp=numpy.hstack( (numpy.array(sp,ndmin=2).T,inp))
if delete_pssm_file :os.system('rm -f %s' % (psiblast_file))
return inp,sequence
def psiblast(self,sequence):
'''
run psiblast on the sequence using all the default parameters. It returns the path to the output psiblast file.
'''
psiblast_file,_=self.psiblast_sequence(sequence, sequence_name=self.seq_name, uniref90_psi_blast_database=self.uniref90_psi_blast_database, psi_blast_path=self.psi_blast_path, folder_with_psiblast_files=self.folder_with_psiblast_files, keep_psiblast_alignments=False)
return psiblast_file
def psiblast_sequence(self,sequence,sequence_name='myseq', uniref90_psi_blast_database=None, psi_blast_path='', folder_with_psiblast_files='psiblast_files/', keep_psiblast_alignments=False):
'''
used internally, use psiblast() as a user
This run psiblast on the sequenc unless the corresponding psiblast output file is found in the folder folder_with_psiblast_files
return psiblast_file,gzip (the path to the file (with file name) and gzip=True if the file was found gzipped (however the actual file has been gunzipped by the function when the function returns).
psi_blast_path=='' means psiblast is added to the system path and can be called from anywhere. (make sure the path ends with '/' if psiblast is not installed globally
'''
if type(sequence) is not str : # we assume is a Seq object
sequence_name=sequence.id
sequence=str(sequence.seq)
if uniref90_psi_blast_database==None or not os.path.isfile(uniref90_psi_blast_database) :
sys.stderr.write("\n**ERROR** psiblast database not found or not specified in run_on_sequence()!!\n (default should be uniref90 filtered from low-complexity regions) declared %s \n" % (str(uniref90_psi_blast_database)) )
sys.stderr.flush()
return 1
if folder_with_psiblast_files[-1]!='/' : folder_with_psiblast_files+='/'
if not os.path.isdir(folder_with_psiblast_files) :
os.system('mkdir '+folder_with_psiblast_files)
psi_blast_file=sequence_name.replace('|','').replace(' ','_').replace('/','').replace(':','_')+'_psi_blast.txt'
gzip=False
blast_anyway=True
#print folder_with_psiblast_files+psi_blast_file
# check if the psiblast file for this sequence name already exists (it is kind of useless to have it saved according to the sequence name)
if os.path.isfile(folder_with_psiblast_files+psi_blast_file+'.gz') :
psiblast_file=folder_with_psiblast_files+psi_blast_file
gzip=True
os.system('gunzip '+folder_with_psiblast_files+psi_blast_file+'.gz')
psiblast_file=folder_with_psiblast_files+psi_blast_file
_, sequence2 = self.input_from_psiblast_file(psiblast_file, input_sequence=None, delete_pssm_file=False)
if sequence==sequence2 : blast_anyway=False
elif os.path.isfile(folder_with_psiblast_files+psi_blast_file) :
psiblast_file=folder_with_psiblast_files+psi_blast_file
_, sequence2 = self.input_from_psiblast_file(psiblast_file, input_sequence=None, delete_pssm_file=False)
if sequence==sequence2 : blast_anyway=False
if blast_anyway :
psiblast_alignments=sequence_name.replace('|','').replace(' ','_').replace('/','').replace(':','_')+'_blast.txt'
if self.use_psiblast_checkpoint : psiblast_file= psiblast_checkpoint(sequence, uniref90_psi_blast_database, BLAST_PATH=psi_blast_path, sequence_name=sequence_name, BLASTFILE=psiblast_alignments,temporary_file_directory=self.temporary_file_directory, num_iterations=3,ncpu=self.psiblast_ncpu, psi_blast_file=folder_with_psiblast_files+psi_blast_file)
else : psiblast_file=psi_blast(sequence, uniref90_psi_blast_database, BLAST_PATH=psi_blast_path, sequence_name=sequence_name, BLASTFILE=psiblast_alignments, num_iterations=3,ncpu=self.psiblast_ncpu, psi_blast_file=folder_with_psiblast_files+psi_blast_file)
if not keep_psiblast_alignments :
os.system('rm -f %s' % (psiblast_alignments)) # remove blast file with the alignment, we need only the psiblast one
return psiblast_file,gzip
def run_on_sequence(self,sequence, uniref90_psi_blast_database=None,sequence_name='myseq', psi_blast_path='', folder_with_psiblast_files='psiblast_files/', keep_psiblast_alignments=False):
'''
psi_blast_path=='' means psiblast is added to the system path and can be called from anywhere. (make sure the path ends with '/' if psiblast is not installed globally)
'''
if type(sequence) is not str : # we assume is a Seq object
sequence_name=sequence.id
sequence=str(sequence.seq)
if self.use_psiblast :
psiblast_file,gzip= self.psiblast_sequence(sequence,sequence_name=sequence_name, uniref90_psi_blast_database=uniref90_psi_blast_database, psi_blast_path=psi_blast_path, folder_with_psiblast_files=folder_with_psiblast_files, keep_psiblast_alignments=keep_psiblast_alignments)
inp,sequence2, output = self.run_on_psiblast_file(psiblast_file,input_sequence=sequence)
if gzip :
os.system('gzip '+psiblast_file)
if sequence!=sequence2 :
if inp!=None : sys.stderr.write("**ERROR** after psiblast. Returned sequence is different from original one. Maybe in folder %s there is already a file named %s. Remove it or give a different sequence !!\n\n" % (folder_with_psiblast_files,psiblast_file))
else : sys.stderr.write("psiblast failed on sequence:\n%s\n" % (sequence))
return None , None
if not self.keep_pssm_profile_files : os.system('rm -f %s*' % (psiblast_file))
return inp, output
def run_Nto1(self,sequence,read_staff_from_psiblastfile=None,seq_name='seq_for_s2D'):
'''
it runs an Nto1 network
'''
if type(sequence) is str :
self.sequence=sequence
if self.seq_name==None : self.seq_name=seq_name
else :
self.sequence=str(sequence.seq)
self.seq_name=str(sequence.id)
if read_staff_from_psiblastfile!=None :
if read_staff_from_psiblastfile==True :
read_staff_from_psiblastfile=self.psiblast(sequence)
inp,_=self.input_from_psiblast_file(read_staff_from_psiblastfile, input_sequence=sequence, delete_pssm_file=False)
else :
inp=[]
for s in sequence :
inp.append(residue_to_input(s, numb_neurones=20, use_rank=res_closness_blosum62,nres=20))
if self.Nto1_net==None and self.Nto1_netfile!=None :
self.Nto1_net=PyELM.loadModel(self.Nto1_netfile,verbose=not WEBSERVER_MODE)
Pred=run_net(self.Nto1_net, [inp], postprocess=self.postprocess)
return Pred, inp
def run(self,sequence, seq_name='seq_for_s2D',keep_psiblast_alignments=False):
if type(sequence) is str :
self.sequence=sequence
self.seq_name=seq_name
else :
self.sequence=str(sequence.seq)
sequence.id=sequence.id #.replace('|','').replace(' ','_').replace('/','').replace(':','_')
self.seq_name=str(sequence.id)
#if self.use_psiblast : print 'Using psiblastDB %s' % (str(self.uniref90_psi_blast_database))
###
#print self.sequence,self.seq_name
self.input, self.output= self.run_on_sequence(self.sequence,sequence_name=self.seq_name,uniref90_psi_blast_database=self.uniref90_psi_blast_database, folder_with_psiblast_files=self.folder_with_psiblast_files,keep_psiblast_alignments=keep_psiblast_alignments)
def get_ss_kind(self,out_per_res):
f=out_per_res.argmax()
if f==0 : return 'H'
if f==1 : return 'E'
if f==2 : return 'C'
if f==3 : return 'P'
def print_results(self, out=sys.stdout):
close_file=False
if type(out) is str :
close_file=True
print '\nSaving output to file %s...' % (out)
out=open(out,'w')
if len(self.output)!=len(self.sequence) :
sys.stderr.write("ERROR in print_results() len output!= len sequence %d != %d\n\n" % (len(self.output),len(self.sequence)))
self.ss_string=''
self.ss_profiles=[]
n_out=len(self.output[0])
out.write('> %s\n' % (self.seq_name))
tmp='#rid\tresname'
for i in xrange(n_out) :
self.ss_profiles.append([])
tmp+='\t%s' % (self.out_tags[i])
tmp+='\tss_kind'
out.write(tmp+'\n')
for j,r in enumerate(self.sequence) :
tmp=''
for i,float_n in enumerate(self.output[j]) :
tmp+='\t%lf' % (float_n)
self.ss_profiles[i]+=[ float_n ]
k=self.get_ss_kind(self.output[j])
out.write('%d\t%s%s\t%s\n' % (j+1,r, tmp ,k))
self.ss_string+=k
if close_file :
out.close()
def plot_results(self,save=True,show=False,dpi=250, plotCoil=False,savefolder='',**kwargs):
if not can_plot :
raise Exception("When first importing the module there was a problem importing matplotlib (maybe you don't have version >= 1.4?)")
if type(save) is bool and save==True :
save =self.seq_name.replace('|','_').replace(' ','_').replace('/','_').replace(':','_')+'_s2D_plot.png'
if savefolder!='' and savefolder!=None :
if savefolder[-1]!='/' : savefolder+='/'
save=savefolder+save
plot_s2D_results(self.output,self.sequence,self.ss_string,seq_names=self.seq_name,bar=True,dont_plot_coil=not plotCoil,y_range=(0,1),dpi=dpi,save=save,show=show,**kwargs)
|
mit
|
michigraber/scikit-learn
|
examples/svm/plot_separating_hyperplane_unbalanced.py
|
329
|
1850
|
"""
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
KasperPRasmussen/bokeh
|
bokeh/charts/__init__.py
|
3
|
1426
|
from __future__ import absolute_import
from ..util.dependencies import import_required
import_required(
'pandas',
'The bokeh.charts interface requires Pandas (http://pandas.pydata.org) to be installed.'
)
# defaults and constants
from ..plotting.helpers import DEFAULT_PALETTE
# main components
from .chart import Chart, defaults
# operations and attributes for users to input into Charts
from .attributes import color, marker, cat
from .operations import stack, blend
from .stats import bins
# builders
from .builders.line_builder import Line
from .builders.histogram_builder import Histogram
from .builders.bar_builder import Bar
from .builders.scatter_builder import Scatter
from .builders.boxplot_builder import BoxPlot
from .builders.step_builder import Step
from .builders.timeseries_builder import TimeSeries
from .builders.dot_builder import Dot
from .builders.area_builder import Area
from .builders.horizon_builder import Horizon
from .builders.heatmap_builder import HeatMap
from .builders.donut_builder import Donut
# easy access to required bokeh components
from ..models import ColumnDataSource
from ..io import (
curdoc, output_file, output_notebook, output_server, push,
reset_output, save, show, gridplot, vplot, hplot)
# Silence pyflakes
(curdoc, output_file, output_notebook, output_server, push,
reset_output, save, show, gridplot, vplot, hplot, ColumnDataSource,
DEFAULT_PALETTE)
|
bsd-3-clause
|
MKLab-ITI/news-popularity-prediction
|
news_popularity_prediction/learning/cascade_lifetime.py
|
1
|
24497
|
__author__ = 'Georgios Rizos ([email protected])'
import os
import statistics
import numpy as np
import pandas as pd
from news_popularity_prediction.datautil.feature_rw import h5load_from, h5store_at, h5_open, h5_close, get_target_value,\
get_kth_row
from news_popularity_prediction.discussion.features import get_branching_feature_names, get_usergraph_feature_names,\
get_temporal_feature_names
from news_popularity_prediction.learning import concatenate_features
def make_feature_matrices(features_folder,
osn_focus):
# Read comparison lifetimes.
k_list_file_path = features_folder + "/k_list/focus_" + "post" + ".txt"
k_list = load_valid_k_list(k_list_file_path)
# Get feature names.
branching_feature_dict = dict()
usergraph_feature_dict = dict()
temporal_feature_dict = dict()
branching_feature_dict[osn_focus] = get_branching_feature_names(osn_name=osn_focus)
usergraph_feature_dict[osn_focus] = get_usergraph_feature_names(osn_name=osn_focus)
temporal_feature_dict[osn_focus] = get_temporal_feature_names(osn_name=osn_focus)
branching_feature_names_list_dict = dict()
usergraph_feature_names_list_dict = dict()
temporal_feature_names_list_dict = dict()
branching_feature_names_list_dict[osn_focus] = sorted(branching_feature_dict[osn_focus])
usergraph_feature_names_list_dict[osn_focus] = sorted(usergraph_feature_dict[osn_focus])
temporal_feature_names_list_dict[osn_focus] = sorted(temporal_feature_dict[osn_focus])
number_of_branching_features_dict = dict()
number_of_usergraph_features_dict = dict()
number_of_temporal_features_dict = dict()
number_of_branching_features_dict[osn_focus] = len(branching_feature_names_list_dict[osn_focus])
number_of_usergraph_features_dict[osn_focus] = len(usergraph_feature_names_list_dict[osn_focus])
number_of_temporal_features_dict[osn_focus] = len(temporal_feature_names_list_dict[osn_focus])
# Make dataset matrix at time t_{\infty}.
dataset_full_path = features_folder + "/dataset_full/dataset_full.h5"
h5_stores_and_keys = get_h5_stores_and_keys(features_folder,
"post")
dataset_size = get_dataset_size(h5_stores_and_keys,
"post")
dataset_full,\
index = form_dataset_full(dataset_size,
h5_stores_and_keys,
osn_focus,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict,
number_of_branching_features_dict,
number_of_usergraph_features_dict,
number_of_temporal_features_dict)
store_dataset_full(dataset_full_path,
dataset_full,
index,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict)
X_k_min_dict = dict()
X_t_next_dict = dict()
X_k_min_dict[osn_focus] = np.zeros(dataset_size, dtype=int)
X_t_next_dict[osn_focus] = np.zeros(dataset_size, dtype=float)
for k_index, k in enumerate(k_list):
dataset_k,\
X_k_min_dict,\
X_t_next_dict,\
index = form_dataset_k(dataset_size,
h5_stores_and_keys,
float(k),
X_k_min_dict,
X_t_next_dict,
feature_osn_name_list=[osn_focus])
try:
dataset_k_path = features_folder + "/dataset_k/" + osn_focus + "_lifetime_" + k + "_dataset_k.h5"
except TypeError:
dataset_k_path = features_folder + "/dataset_k/" + osn_focus + "_lifetime_" + repr(k) + "_dataset_k.h5"
store_dataset_k(dataset_k_path,
dataset_k,
X_k_min_dict,
X_t_next_dict,
index)
def form_dataset_full(dataset_size,
h5_stores_and_keys,
osn_focus,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict,
number_of_branching_features_dict,
number_of_usergraph_features_dict,
number_of_temporal_features_dict):
osn_to_targetlist = dict()
if osn_focus == "reddit":
osn_to_targetlist["reddit"] = ["comments",
"users",
"score_wilson",
"controversiality_wilson"]
if osn_focus == "slashdot":
osn_to_targetlist["slashdot"] = ["comments",
"users"]
# Initialize full feature arrays.
dataset_full = dict()
index = dict()
for osn_name in osn_to_targetlist.keys():
dataset_full[osn_name] = dict()
index[osn_name] = list()
X_branching_full = np.empty((dataset_size,
number_of_branching_features_dict[osn_name]),
dtype=np.float64)
dataset_full[osn_name]["X_branching"] = X_branching_full
X_usergraph_full = np.empty((dataset_size,
number_of_usergraph_features_dict[osn_name]),
dtype=np.float64)
dataset_full[osn_name]["X_usergraph"] = X_usergraph_full
X_temporal_full = np.empty((dataset_size,
number_of_temporal_features_dict[osn_name]),
dtype=np.float64)
dataset_full[osn_name]["X_temporal"] = X_temporal_full
dataset_full[osn_name]["y_raw"] = dict()
for target_name in osn_to_targetlist[osn_name]:
dataset_full[osn_name]["y_raw"][target_name] = np.empty(dataset_size, dtype=np.float64)
# Fill full feature arrays.
offset = 0
for h5_store_files, h5_keys in h5_stores_and_keys:
index[osn_name].extend(h5_keys)
fill_X_handcrafted_full_and_y_raw(dataset_full,
h5_store_files,
h5_keys["post"],
offset,
osn_name,
osn_to_targetlist[osn_name],
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict,
number_of_branching_features_dict,
number_of_usergraph_features_dict,
number_of_temporal_features_dict)
offset += len(h5_keys["post"])
return dataset_full, index
def fill_X_handcrafted_full_and_y_raw(dataset_full,
h5_store_files,
h5_keys,
offset,
osn_name,
target_list,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict,
number_of_branching_features_dict,
number_of_usergraph_features_dict,
number_of_temporal_features_dict):
for d, h5_key in enumerate(h5_keys):
handcrafted_features_data_frame = h5load_from(h5_store_files[1], h5_key)
kth_row = get_kth_row(handcrafted_features_data_frame,
-1,
branching_feature_names_list_dict[osn_name])
dataset_full[osn_name]["X_branching"][offset + d, :number_of_branching_features_dict[osn_name]] = kth_row
kth_row = get_kth_row(handcrafted_features_data_frame,
-1,
usergraph_feature_names_list_dict[osn_name])
dataset_full[osn_name]["X_usergraph"][offset + d, :number_of_usergraph_features_dict[osn_name]] = kth_row
kth_row = get_kth_row(handcrafted_features_data_frame,
-1,
temporal_feature_names_list_dict[osn_name])
dataset_full[osn_name]["X_temporal"][offset + d, :number_of_temporal_features_dict[osn_name]] = kth_row
for target_name in target_list:
dataset_full[osn_name]["y_raw"][target_name][offset + d] = get_target_value(handcrafted_features_data_frame,
target_name)
def load_dataset_full(dataset_full_path,
target_osn_name,
feature_osn_name_list,
target_name_list,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict):
dataset_full = dict()
dataset_full[target_osn_name] = dict()
index = dict()
h5_store = h5_open(dataset_full_path)
for osn_name in feature_osn_name_list:
df = h5load_from(h5_store, "/data/" + osn_name + "/X_branching")[branching_feature_names_list_dict[osn_name]]
index[osn_name] = list(df.index)
dataset_full[osn_name]["X_branching"] = df.values
dataset_full[osn_name]["X_usergraph"] = h5load_from(h5_store, "/data/" + osn_name + "/X_usergraph")[usergraph_feature_names_list_dict[osn_name]].values
dataset_full[osn_name]["X_temporal"] = h5load_from(h5_store, "/data/" + osn_name + "/X_temporal")[temporal_feature_names_list_dict[osn_name]].values
data_frame = h5load_from(h5_store, "/data/" + target_osn_name + "/y_raw")
dataset_full[target_osn_name]["y_raw"] = dict()
for target_name in target_name_list:
dataset_full[target_osn_name]["y_raw"][target_name] = data_frame[target_name].values
h5_close(h5_store)
return dataset_full, index
def store_dataset_full(dataset_full_path,
dataset_full,
index,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict):
h5_store = h5_open(dataset_full_path)
for osn_name in dataset_full.keys():
h5store_at(h5_store,
"/data/" + osn_name + "/X_branching",
pd.DataFrame(dataset_full[osn_name]["X_branching"],
columns=branching_feature_names_list_dict[osn_name]))
h5store_at(h5_store,
"/data/" + osn_name + "/X_usergraph",
pd.DataFrame(dataset_full[osn_name]["X_usergraph"],
columns=usergraph_feature_names_list_dict[osn_name]))
h5store_at(h5_store,
"/data/" + osn_name + "/X_temporal",
pd.DataFrame(dataset_full[osn_name]["X_temporal"],
columns=temporal_feature_names_list_dict[osn_name]))
y_raw_dict = dict()
for target_name in dataset_full[osn_name]["y_raw"].keys():
y_raw_dict[target_name] = dataset_full[osn_name]["y_raw"][target_name]
h5store_at(h5_store,
"/data/" + osn_name + "/y_raw",
pd.DataFrame(y_raw_dict))
h5_close(h5_store)
def load_dataset_k(dataset_k_path,
feature_osn_name_list,
branching_feature_names_list_dict,
usergraph_feature_names_list_dict,
temporal_feature_names_list_dict):
dataset_k = dict()
X_k_min_dict = dict()
X_t_next_dict = dict()
index = dict()
h5_store = h5_open(dataset_k_path)
for osn_name in feature_osn_name_list:
dataset_k[osn_name] = dict()
df = h5load_from(h5_store, "/data/" + osn_name + "/X_branching")[branching_feature_names_list_dict[osn_name]]
index[osn_name] = list(df.index)
dataset_k[osn_name]["X_branching"] = df.values
dataset_k[osn_name]["X_usergraph"] = h5load_from(h5_store, "/data/" + osn_name + "/X_usergraph")[usergraph_feature_names_list_dict[osn_name]].values
dataset_k[osn_name]["X_temporal"] = h5load_from(h5_store, "/data/" + osn_name + "/X_temporal")[temporal_feature_names_list_dict[osn_name]].values
data_frame = h5load_from(h5_store, "/data/" + osn_name + "/utility_arrays")
X_k_min_dict[osn_name] = data_frame["X_k_min_array"].values
X_t_next_dict[osn_name] = data_frame["X_t_next_array"].values
h5_close(h5_store)
return dataset_k, X_k_min_dict, X_t_next_dict, index
def store_dataset_k(dataset_k_path,
dataset_k,
X_k_min_dict,
X_t_next_dict,
index):
h5_store = h5_open(dataset_k_path)
for osn_name in dataset_k.keys():
h5store_at(h5_store,
"/data/" + osn_name + "/X_branching",
pd.DataFrame(dataset_k[osn_name]["X_branching"],
columns=sorted(list(get_branching_feature_names(osn_name)))))
h5store_at(h5_store,
"/data/" + osn_name + "/X_usergraph",
pd.DataFrame(dataset_k[osn_name]["X_usergraph"],
columns=sorted(list(get_usergraph_feature_names(osn_name)))))
h5store_at(h5_store,
"/data/" + osn_name + "/X_temporal",
pd.DataFrame(dataset_k[osn_name]["X_temporal"],
columns=sorted(list(get_temporal_feature_names(osn_name)))))
utility_arrays = dict()
utility_arrays["X_k_min_array"] = X_k_min_dict[osn_name]
utility_arrays["X_t_next_array"] = X_t_next_dict[osn_name]
h5store_at(h5_store,
"/data/" + osn_name + "/utility_arrays",
pd.DataFrame(utility_arrays))
h5_close(h5_store)
def form_dataset_k(dataset_size,
h5_stores_and_keys,
k,
X_k_min_dict,
X_t_next_dict,
feature_osn_name_list):
all_feature_osn_names = feature_osn_name_list
dataset_k = dict()
index = dict()
for osn_index, osn_name in enumerate(all_feature_osn_names):
dataset_k[osn_name] = dict()
index[osn_name] = list()
X_branching_k = np.empty((dataset_size,
10),
dtype=np.float64)
dataset_k[osn_name]["X_branching"] = X_branching_k
X_usergraph_k = np.empty((dataset_size,
7),
dtype=np.float64)
dataset_k[osn_name]["X_usergraph"] = X_usergraph_k
X_temporal_k = np.empty((dataset_size,
4),
dtype=np.float64)
dataset_k[osn_name]["X_temporal"] = X_temporal_k
# Fill full feature arrays.
offset = 0
for h5_store_files, h5_keys in h5_stores_and_keys:
index[osn_name].extend(h5_keys)
calculate_k_based_on_lifetime(dataset_k, h5_store_files, h5_keys, offset, k, X_k_min_dict, X_t_next_dict, osn_name)
fill_X_handcrafted_k(dataset_k, h5_store_files, h5_keys["post"], offset, k, X_k_min_dict, X_t_next_dict, osn_name)
offset += len(h5_keys["post"])
return dataset_k, X_k_min_dict, X_t_next_dict, index
def calculate_k_based_on_lifetime(dataset_k,
h5_store_files,
h5_keys,
offset,
k,
X_k_min_dict,
X_t_next_dict,
osn_name):
number_of_keys = len(h5_keys["post"])
for d in range(number_of_keys):
timestamps_data_frame = h5load_from(h5_store_files[0], h5_keys["post"][d])
if np.isnan(X_t_next_dict[osn_name][offset + d]):
continue
observed_comments,\
next_lifetime = get_k_based_on_lifetime(timestamps_data_frame,
k,
min_k=X_k_min_dict[osn_name][offset + d],
max_k=-1)
X_k_min_dict[osn_name][offset + d] = observed_comments
X_t_next_dict[osn_name][offset + d] = next_lifetime
def fill_X_handcrafted_k(dataset_k,
h5_store_files,
h5_keys,
offset,
k,
X_k_min_dict,
X_t_next_dict,
osn_name):
concatenate_features.fill_X_handcrafted_k_actual(dataset_k,
h5_store_files,
h5_keys,
offset,
k,
X_k_min_dict,
X_t_next_dict,
sorted(list(get_branching_feature_names(osn_name))),
sorted(list(get_usergraph_feature_names(osn_name))),
sorted(list(get_temporal_feature_names(osn_name))),
osn_name)
def calculate_comparison_lifetimes(features_folder,
osn_focus):
if osn_focus is None:
osn_focus = "post"
h5_stores_and_keys = get_h5_stores_and_keys(features_folder,
osn_focus)
t_list = make_valid_k_list(h5_stores_and_keys,
osn_focus)
k_list_path = features_folder + "/k_list/focus_" + osn_focus + ".txt"
store_valid_k_list(k_list_path,
t_list)
def get_h5_stores_and_keys(features_folder,
osn_focus):
# This is a list of all the .h5 files as produced after preprocessing.
h5_store_file_name_list = os.listdir(features_folder)
h5_store_file_name_list = [h5_store_file_name for h5_store_file_name in sorted(h5_store_file_name_list) if not h5_store_file_name[-1] == "~"]
timestamp_h5_store_file_name_list = [name for name in h5_store_file_name_list if "timestamp" in name]
handcrafted_features_h5_store_file_name_list = [name for name in h5_store_file_name_list if "handcrafted" in name]
timestamp_h5_store_file_path_list = [features_folder + "/" + h5_store_file_name for h5_store_file_name in timestamp_h5_store_file_name_list]
handcrafted_features_h5_store_file_path_list = [features_folder + "/" + h5_store_file_name for h5_store_file_name in handcrafted_features_h5_store_file_name_list]
file_path_list_zip = zip(timestamp_h5_store_file_path_list,
handcrafted_features_h5_store_file_path_list)
h5_stores_and_keys = list()
for file_paths in file_path_list_zip:
timestamp_h5_store_file = h5_open(file_paths[0])
handcrafted_features_h5_store_file = h5_open(file_paths[1])
keys_dict = dict()
keys_dict[osn_focus] = sorted((key for key in timestamp_h5_store_file.keys() if osn_focus in key))
h5_stores_and_keys.append(((timestamp_h5_store_file,
handcrafted_features_h5_store_file),
keys_dict))
return h5_stores_and_keys
def make_valid_k_list(h5_stores_and_keys,
osn_focus):
comment_lifetime_list = get_all_post_lifetimes(h5_stores_and_keys,
osn_focus)
comment_lifetime_mean = statistics.mean(comment_lifetime_list)
# Fit a nonnegative function.
t_list = np.linspace(0, comment_lifetime_mean, 100)
t_list = t_list[0:15]
t_list = list(t_list)
return t_list
def store_valid_k_list(k_list_path, k_list):
with open(k_list_path, "w") as fp:
for k in k_list:
row = repr(k) + "\n"
fp.write(row)
def load_valid_k_list(k_list_path):
k_list = list()
with open(k_list_path, "r") as fp:
for row in fp:
row_stripped = row.strip()
if row_stripped == "":
continue
k_list.append(row_stripped)
return k_list
def get_dataset_size(h5_store_and_keys,
osn_focus):
dataset_size = 0
for h5_store_file, h5_key_list in h5_store_and_keys:
dataset_size += len(h5_key_list[osn_focus])
return dataset_size
def get_all_post_lifetimes(h5_stores_and_keys, osn_focus):
all_post_lifetimes_list = list()
append_post_lifetime = all_post_lifetimes_list.append
for h5_store_files, h5_keys in h5_stores_and_keys:
for h5_key in h5_keys[osn_focus]:
timestamps_data_frame = h5load_from(h5_store_files[0], h5_key)
timestamps_col = timestamps_data_frame["timestamp"]
if timestamps_col.size == 1:
index = 0
else:
index = int(np.ceil(0.99 * (timestamps_col.size - 1)))
append_post_lifetime(timestamps_col.iloc[index] - timestamps_col.iloc[0])
return all_post_lifetimes_list
def get_all_comment_lifetimes(h5_stores_and_keys, osn_focus):
all_comment_timestamps_list = list()
extend_comment_timestamp = all_comment_timestamps_list.extend
for h5_store_files, h5_keys in h5_stores_and_keys:
for h5_key in h5_keys[osn_focus]:
timestamps_data_frame = h5load_from(h5_store_files[0], h5_key)
timestamps_col = timestamps_data_frame["timestamp"]
extend_comment_timestamp(timestamps_col.iloc[1:] - timestamps_col.iloc[0])
return all_comment_timestamps_list
def get_dataframe_row(data_frame, k, k_based_on_lifetime_old, feature_list):
lifetime = k
k_based_on_lifetime = get_k_based_on_lifetime(data_frame, lifetime, min_k=k_based_on_lifetime_old, max_k=-1)
kth_row = get_kth_row(data_frame, k_based_on_lifetime, feature_list)
return kth_row, k_based_on_lifetime
def get_k_based_on_timestamp(data_frame, timestamp, min_k=0, max_k=-1):
timestamp_col = data_frame["timestamp"]
timestamp_col = timestamp_col.iloc[:, min_k, max_k + 1]
index = np.array(timestamp_col >= timestamp)
if index.shape[1] == 0:
k = -1
else:
k = index[0]
return k
def get_k_based_on_lifetime(data_frame, lifetime, min_k, max_k):
lifetime_col = data_frame["timestamp"] - data_frame["timestamp"].iloc[0]
lifetime_col = lifetime_col.iloc[min_k:]
index = np.searchsorted(lifetime_col, lifetime)
index = max(0, index[0]-1)
k = min_k + index
if lifetime_col.size > (index+1):
next_t = lifetime_col.iloc[index+1]
if k == min_k:
if lifetime_col.iloc[index] == lifetime_col.iloc[index+1]:
k += 1
if lifetime_col.size > (index+2):
next_t = lifetime_col.iloc[index+2]
else:
next_t = np.nan
else:
next_t = np.nan
return k, next_t
def get_cascade_lifetime(data_frame):
timestamp_col = data_frame["timestamp"]
cascade_source_timestamp = timestamp_col.iloc[0]
last_comment_timestamp = timestamp_col.iloc[-1]
cascade_lifetime = last_comment_timestamp - cascade_source_timestamp
return cascade_lifetime
|
apache-2.0
|
akrherz/idep
|
scripts/import/flowpath_importer_old.py
|
2
|
7800
|
"""Process provided DBF files into the database
Brian Gelder provides me a zipfile full of dbfs (one per HUC12). This script
consumes those dbf files and places them in the database.
python flowpath_importer.py <scenario> <path to dbf files in ../../data/>
"""
from __future__ import print_function
import glob
import os
import sys
import shapefile
from tqdm import tqdm
import pandas as pd
from pyiem.util import get_dbconn
print(" * BE CAREFUL! The dbf files may not be 5070, but 26915")
print(" * VERIFY IF POINT_X or X is the 5070 grid value")
print(" * This will generate a `myhucs.txt` file with found HUCs")
SCENARIO = int(sys.argv[1])
PREFIX = "fp"
TRUNC_GRIDORDER_AT = 4
PGCONN = get_dbconn("idep")
INSERT_SQL = """
INSERT into flowpath_points(flowpath, segid,
elevation, length, surgo, management, slope, geom,
landuse, scenario, gridorder)
values(%s, %s, %s, %s, %s, %s, %s,
ST_Transform(ST_Geomfromewkt('SRID=5070;POINT(%s %s)'), 5070),
%s, %s, %s)
"""
def get_flowpath(cursor, huc12, fpath):
"""Get or create a database flowpath identifier
Args:
cursor (psycopg2.cursor): database cursor
huc12 (str): HUC12 identifier
fpath (int): the flowpath id value for this HUC12
Returns:
int the value of this huc12 flowpath
"""
cursor.execute(
"""
SELECT fid from flowpaths where huc_12 = %s and fpath = %s
and scenario = %s
""",
(huc12, fpath, SCENARIO),
)
if cursor.rowcount == 0:
cursor.execute(
"""
INSERT into flowpaths(huc_12, fpath, scenario)
values (%s, %s, %s) RETURNING fid
""",
(huc12, fpath, SCENARIO),
)
return cursor.fetchone()[0]
def get_data(filename):
"""Converts a dbf file into a pandas dataframe
Args:
filename (str): The dbf filename to process
Returns:
pd.DataFrame with the dbf data included.
"""
# hack to read just dbf file
# https://github.com/GeospatialPython/pyshp/issues/35
dbf = shapefile.Reader(dbf=open(filename, "rb"))
rows = dbf.records()
fields = dbf.fields[1:]
field_names = [field[0] for field in fields]
df = pd.DataFrame(rows)
df.columns = field_names
return df
def delete_previous(cursor, huc12):
"""This file is the authority for the HUC12, so we cull previous content.
"""
cursor.execute(
"""
DELETE from flowpath_points p USING flowpaths f WHERE
p.scenario = %s and p.flowpath = f.fid and f.huc_12 = %s
and f.scenario = %s
""",
(SCENARIO, huc12, SCENARIO),
)
cursor.execute(
"""
DELETE from flowpaths WHERE
scenario = %s and huc_12 = %s
""",
(SCENARIO, huc12),
)
def process(cursor, filename, huc12df):
"""Processing of a HUC12's data into the database
Args:
cursor (psycopg2.cursor): database cursor
filename (str): the dbf filename
huc12df (pd.DataFrame): the dataframe containing the dbf data
Returns:
None
"""
# We get the huc12 code based on the filename
huc12 = filename[-16:-4]
delete_previous(cursor, huc12)
huc8 = huc12[:-4]
# the inbound dataframe has lots of data, one row per flowpath point
# We group the dataframe by the column which uses a PREFIX and the huc8
for flowpath, df in huc12df.groupby("%s%s" % (PREFIX, huc8)):
# never do flowpath zero!
if flowpath == 0:
continue
# Sort along the length column, which orders the points from top
# to bottom
df = df.sort_values("%sLen%s" % (PREFIX, huc8[:5]), ascending=True)
# Get or create the flowpathid from the database
fid = get_flowpath(cursor, huc12, flowpath)
# Remove any previous data for this flowpath
cursor.execute(
"""
DELETE from flowpath_points WHERE flowpath = %s
and scenario = %s
""",
(fid, SCENARIO),
)
linestring = []
sz = len(df.index)
for segid, (_, row) in enumerate(df.iterrows()):
if (segid + 1) == sz: # Last row!
# This effectively repeats the slope of the previous point
# by having a negative dx and negative dy below. <hack>
row2 = df.iloc[segid - 1]
else:
row2 = df.iloc[segid + 1]
dy = row["ep3m%s" % (huc8[:6],)] - row2["ep3m%s" % (huc8[:6],)]
dx = (
row2["%sLen%s" % (PREFIX, huc8[:5])]
- row["%sLen%s" % (PREFIX, huc8[:5])]
)
# gridorder = 4
# Some optional code here that was used for the grid order work
gridorder = row["gord_%s" % (huc8[:5],)]
if gridorder > TRUNC_GRIDORDER_AT:
continue
if dx == 0:
slope = 0
else:
slope = dy / dx
lu = row["CropRotatn"].strip()
# Don't allow points without a rotation
if lu == "":
continue
# OK, be careful here. Presently, the 6 char field covers
# 2010 thru 2017, so we rotate to cover the first and last years
# 2007 2011[1]
# 2008 2010[0]
# 2009 2011[1]
# 2018 2016[6]
# 2019 2017[7]
full_lu = "%s%s%s%s%s%s" % (lu[1], lu[0], lu[1], lu, lu[6], lu[7])
args = (
fid,
segid,
row["ep3m%s" % (huc8[:6],)] / 100.0,
row["%sLen%s" % (PREFIX, huc8[:5])] / 100.0,
row["gSSURGO"],
row["Management"],
slope,
row["POINT_X"],
row["POINT_Y"],
full_lu,
SCENARIO,
gridorder,
)
cursor.execute(INSERT_SQL, args)
linestring.append("%s %s" % (row["POINT_X"], row["POINT_Y"]))
# Line string must have at least 2 points
if len(linestring) > 1:
sql = """
UPDATE flowpaths SET geom =
ST_Transform(ST_GeomFromeWkt('SRID=4326;LINESTRING(%s)'), 5070)
WHERE fid = %s and scenario = %s
""" % (
",".join(linestring),
fid,
SCENARIO,
)
cursor.execute(sql)
else:
# Cull our work above if this flowpath is too short
cursor.execute(
"""
DELETE from flowpath_points
where flowpath = %s and scenario = %s
""",
(fid, SCENARIO),
)
cursor.execute(
"""
DELETE from flowpaths where fid = %s
and scenario = %s
""",
(fid, SCENARIO),
)
return huc12
def main():
"""Our main function, the starting point for code execution"""
# track our work
fp = open("myhucs.txt", "w")
# Change the working directory to where we have data files
os.chdir("../../data/%s" % (sys.argv[2],))
# collect up the dbfs in that directory
fns = glob.glob("*.dbf")
i = 0
cursor = PGCONN.cursor()
for fn in tqdm(fns):
# Save our work every 100 dbfs, so to keep the database transaction
# at a reasonable size
if i > 0 and i % 100 == 0:
PGCONN.commit()
cursor = PGCONN.cursor()
df = get_data(fn)
huc12 = process(cursor, fn, df)
fp.write("%s\n" % (huc12,))
i += 1
fp.close()
# Commit the database changes
cursor.close()
PGCONN.commit()
if __name__ == "__main__":
main()
|
mit
|
vkscool/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/dates.py
|
54
|
33991
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing
on the shoulders of python :mod:`datetime`, the add-on modules
:mod:`pytz` and :mod:`dateutils`. :class:`datetime` objects are
converted to floating point numbers which represent the number of days
since 0001-01-01 UTC. The helper functions :func:`date2num`,
:func:`num2date` and :func:`drange` are used to facilitate easy
conversion to and from :mod:`datetime` and numeric ranges.
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutils.rrule` (`dateutil
<https://moin.conectiva.com.br/DateUtil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
Date formatters
---------------
Here all all the date formatters:
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
import re, time, math, datetime
import pytz
# compatability for 2008c and older versions
try:
import pytz.zoneinfo
except ImportError:
pytz.zoneinfo = pytz.tzinfo
pytz.zoneinfo.UTC = pytz.UTC
import matplotlib
import numpy as np
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
from pytz import timezone
from dateutil.rrule import rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, \
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY
from dateutil.relativedelta import relativedelta
import dateutil.parser
__all__ = ( 'date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'DateLocator', 'RRuleLocator',
'YearLocator', 'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'rrule', 'MO', 'TU', 'WE', 'TH', 'FR',
'SA', 'SU', 'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
UTC = pytz.timezone('UTC')
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
return pytz.timezone(s)
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60.*HOURS_PER_DAY
SECONDS_PER_DAY = 60.*MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY + dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY + dt.microsecond/MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None: tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24*remainder, 1)
minute, remainder = divmod(60*remainder, 1)
second, remainder = divmod(60*remainder, 1)
microsecond = int(1e6*remainder)
if microsecond<10: microsecond=0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond>999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6-microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC.
"""
if not cbook.iterable(d): return _to_ordinalf(d)
else: return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j): j = np.asarray(j)
return j + 1721425.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n): n = np.asarray(n)
return n - 1721425.5
def num2date(x, tz=None):
"""
*x* is a float value which gives number of days (fraction part
represents hours, minutes, seconds) since 0001-01-01 00:00:00 UTC.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None: tz = _get_rc_timezone()
if not cbook.iterable(x): return _from_ordinalf(x, tz)
else: return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds/SECONDS_PER_DAY +
delta.microseconds/MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
return np.arange(f1, f2, step)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = self._findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None: tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind>=len(self.t) or ind<=0: return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None):
self._locator = locator
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", tz)
self._tz = tz
def __call__(self, x, pos=0):
scale = float( self._locator._get_unit() )
if ( scale == 365.0 ):
self._formatter = DateFormatter("%Y", self._tz)
elif ( scale == 30.0 ):
self._formatter = DateFormatter("%b %Y", self._tz)
elif ( (scale == 1.0) or (scale == 7.0) ):
self._formatter = DateFormatter("%b %d %Y", self._tz)
elif ( scale == (1.0/24.0) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*60)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*3600)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
else:
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour':0, 'byminute':0,'bysecond':0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def nonsingular(self, vmin, vmax):
unit = self._get_unit()
vmin -= 2*unit
vmax += 2*unit
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try: dmin, dmax = self.viewlim_to_dt()
except ValueError: return []
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dates = self.rule.between(dmin, dmax, True)
return date2num(dates)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
if ( freq == YEARLY ):
return 365
elif ( freq == MONTHLY ):
return 30
elif ( freq == WEEKLY ):
return 7
elif ( freq == DAILY ):
return 1
elif ( freq == HOURLY ):
return (1.0/24.0)
elif ( freq == MINUTELY ):
return (1.0/(24*60))
elif ( freq == SECONDLY ):
return (1.0/(24*3600))
else:
# error
return -1 #or should this just return '1'?
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin: vmin=dmin
vmax = self.rule.after(dmax, True)
if not vmax: vmax=dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None):
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if ( self._freq == YEARLY ):
return 365.0
elif ( self._freq == MONTHLY ):
return 30.0
elif ( self._freq == WEEKLY ):
return 7.0
elif ( self._freq == DAILY ):
return 1.0
elif ( self._freq == HOURLY ):
return 1.0/24
elif ( self._freq == MINUTELY ):
return 1.0/(24*60)
elif ( self._freq == SECONDLY ):
return 1.0/(24*3600)
else:
# error
return -1
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if ( numYears >= numticks ):
self._freq = YEARLY
elif ( numMonths >= numticks ):
self._freq = MONTHLY
bymonth = range(1, 13)
if ( (0 <= numMonths) and (numMonths <= 14) ):
interval = 1 # show every month
elif ( (15 <= numMonths) and (numMonths <= 29) ):
interval = 3 # show every 3 months
elif ( (30 <= numMonths) and (numMonths <= 44) ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif ( numDays >= numticks ):
self._freq = DAILY
bymonth = None
bymonthday = range(1, 32)
if ( (0 <= numDays) and (numDays <= 9) ):
interval = 1 # show every day
elif ( (10 <= numDays) and (numDays <= 19) ):
interval = 2 # show every 2 days
elif ( (20 <= numDays) and (numDays <= 49) ):
interval = 3 # show every 3 days
elif ( (50 <= numDays) and (numDays <= 99) ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif ( numHours >= numticks ):
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range(0, 24) # show every hour
if ( (0 <= numHours) and (numHours <= 14) ):
interval = 1 # show every hour
elif ( (15 <= numHours) and (numHours <= 30) ):
interval = 2 # show every 2 hours
elif ( (30 <= numHours) and (numHours <= 45) ):
interval = 3 # show every 3 hours
elif ( (45 <= numHours) and (numHours <= 68) ):
interval = 4 # show every 4 hours
elif ( (68 <= numHours) and (numHours <= 90) ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif ( numMinutes >= numticks ):
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range(0, 60)
if ( numMinutes > (10.0 * numticks) ):
interval = 10
# end if
elif ( numSeconds >= numticks ):
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range(0, 60)
if ( numSeconds > (10.0 * numticks) ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval=interval, \
dtstart=dmin, until=dmax, \
bymonth=bymonth, bymonthday=bymonthday, \
byhour=byhour, byminute = byminute, \
bysecond=bysecond )
locator = RRuleLocator(rrule, self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = { 'month' : month,
'day' : day,
'hour' : 0,
'minute' : 0,
'second' : 0,
'tzinfo' : tz
}
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 365
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year>=ymax: return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None: bymonth=range(1,13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 30
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 7
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None: bymonthday=range(1,32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None: byhour=range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; use for
intelligent autoscaling
"""
return 1/24.
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None: byminute=range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None: bysecond=range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60*60)
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2-d1
mus = abs(delta.days*MUSECONDS_PER_DAY + delta.seconds*1e6 +
delta.microseconds)
assert(mus<epsilon)
def _close_to_num(o1, o2, epsilon=5):
'Assert that float ordinals *o1* and *o2* are within *epsilon* microseconds.'
delta = abs((o2-o1)*MUSECONDS_PER_DAY)
assert(delta<epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24.*3600.
return 719163 + np.asarray(e)/spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24.*3600.
return (np.asarray(d)-719163)*spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar: return ret[0]
else: return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span==0: span = 1/24.
minutes = span*24*60
hours = span*24
days = span
weeks = span/7.
months = span/31. # approx
years = span/365.
if years>numticks:
locator = YearLocator(int(years/numticks), tz=tz) # define
fmt = '%Y'
elif months>numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks>numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days>numticks:
locator = DayLocator(interval=int(math.ceil(days/numticks)), tz=tz)
fmt = '%b %d'
elif hours>numticks:
locator = HourLocator(interval=int(math.ceil(hours/numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes>numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes/numticks)), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s)/SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m)/MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h/24.
def weeks(w):
'Return weeks as days.'
return w*7.
class DateConverter(units.ConversionInterface):
def axisinfo(unit):
'return the unit AxisInfo'
if unit=='date':
majloc = AutoDateLocator()
majfmt = AutoDateFormatter(majloc)
return units.AxisInfo(
majloc = majloc,
majfmt = majfmt,
label='',
)
else: return None
axisinfo = staticmethod(axisinfo)
def convert(value, unit):
if units.ConversionInterface.is_numlike(value): return value
return date2num(value)
convert = staticmethod(convert)
def default_units(x):
'Return the default unit for *x* or None'
return 'date'
default_units = staticmethod(default_units)
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
if __name__=='__main__':
#tz = None
tz = pytz.timezone('US/Pacific')
#tz = UTC
dt = datetime.datetime(1011, 10, 9, 13, 44, 22, 101010, tzinfo=tz)
x = date2num(dt)
_close_to_dt(dt, num2date(x, tz))
#tz = _get_rc_timezone()
d1 = datetime.datetime( 2000, 3, 1, tzinfo=tz)
d2 = datetime.datetime( 2000, 3, 5, tzinfo=tz)
#d1 = datetime.datetime( 2002, 1, 5, tzinfo=tz)
#d2 = datetime.datetime( 2003, 12, 1, tzinfo=tz)
delta = datetime.timedelta(hours=6)
dates = drange(d1, d2, delta)
# MGDTODO: Broken on transforms branch
#print 'orig', d1
#print 'd2n and back', num2date(date2num(d1), tz)
from _transforms import Value, Interval
v1 = Value(date2num(d1))
v2 = Value(date2num(d2))
dlim = Interval(v1,v2)
vlim = Interval(v1,v2)
#locator = HourLocator(byhour=(3,15), tz=tz)
#locator = MinuteLocator(byminute=(15,30,45), tz=tz)
#locator = YearLocator(base=5, month=7, day=4, tz=tz)
#locator = MonthLocator(bymonthday=15)
locator = DayLocator(tz=tz)
locator.set_data_interval(dlim)
locator.set_view_interval(vlim)
dmin, dmax = locator.autoscale()
vlim.set_bounds(dmin, dmax)
ticks = locator()
fmt = '%Y-%m-%d %H:%M:%S %Z'
formatter = DateFormatter(fmt, tz)
#for t in ticks: print formatter(t)
for t in dates: print formatter(t)
|
gpl-3.0
|
felipebetancur/scipy
|
scipy/cluster/hierarchy.py
|
14
|
91850
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_size(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_size(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
|
bsd-3-clause
|
datapythonista/pandas
|
pandas/tests/frame/methods/test_clip.py
|
1
|
6885
|
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestDataFrameClip:
def test_clip(self, float_frame):
median = float_frame.median().median()
original = float_frame.copy()
double = float_frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that float_frame was not changed inplace
assert (float_frame.values == original.values).all()
def test_inplace_clip(self, float_frame):
# GH#15388
median = float_frame.median().median()
frame_copy = float_frame.copy()
return_value = frame_copy.clip(upper=median, lower=median, inplace=True)
assert return_value is None
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH#2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({"A": [1, 2, 3], "B": [1.0, np.nan, 3.0]})
result = df.clip(1, 2)
expected = DataFrame({"A": [1, 2, 2], "B": [1.0, np.nan, 2.0]})
tm.assert_frame_equal(result, expected, check_like=True)
# GH#24162, clipping now preserves numeric types per column
df = DataFrame([[1, 2, 3.4], [3, 4, 5.6]], columns=["foo", "bar", "baz"])
expected = df.dtypes
result = df.clip(upper=3).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH#6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize(
"axis,res",
[
(0, [[2.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 7.0, 7.0]]),
(1, [[2.0, 3.0, 4.0], [4.0, 5.0, 6.0], [5.0, 6.0, 7.0]]),
],
)
def test_clip_against_list_like(self, simple_frame, inplace, lower, axis, res):
# GH#15390
original = simple_frame.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7], axis=axis, inplace=inplace)
expected = DataFrame(res, columns=original.columns, index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_against_unordered_columns(self):
# GH#20911
df1 = DataFrame(np.random.randn(1000, 4), columns=["A", "B", "C", "D"])
df2 = DataFrame(np.random.randn(1000, 4), columns=["D", "A", "B", "C"])
df3 = DataFrame(df2.values - 1, columns=["B", "D", "C", "A"])
result_upper = df1.clip(lower=0, upper=df2)
expected_upper = df1.clip(lower=0, upper=df2[df1.columns])
result_lower = df1.clip(lower=df3, upper=3)
expected_lower = df1.clip(lower=df3[df1.columns], upper=3)
result_lower_upper = df1.clip(lower=df3, upper=df2)
expected_lower_upper = df1.clip(lower=df3[df1.columns], upper=df2[df1.columns])
tm.assert_frame_equal(result_upper, expected_upper)
tm.assert_frame_equal(result_lower, expected_lower)
tm.assert_frame_equal(result_lower_upper, expected_lower_upper)
def test_clip_with_na_args(self, float_frame):
"""Should process np.nan argument as None """
# GH#17276
tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame)
# GH#19992 and adjusted in GH#40420
df = DataFrame({"col_0": [1, 2, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame(
{"col_0": [4, 5, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]}
)
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame(
{"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [7, 8, 9]}
)
tm.assert_frame_equal(result, expected)
# GH#40420
data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]}
df = DataFrame(data)
t = Series([2, -4, np.NaN, 6, 3])
result = df.clip(lower=t, axis=0)
expected = DataFrame({"col_0": [9, -3, 0, 6, 5], "col_1": [2, -4, 6, 8, 3]})
tm.assert_frame_equal(result, expected)
def test_clip_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame({"a": [1, 2, 3]})
msg = (
r"In a future version of pandas all arguments of DataFrame.clip except "
r"for the arguments 'lower' and 'upper' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.clip(0, 1, 0)
expected = DataFrame({"a": [1, 1, 1]})
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
treycausey/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
8
|
3318
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
pl.matshow(img)
pl.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
pl.matshow(img)
pl.matshow(label_im)
pl.show()
|
bsd-3-clause
|
cython-testbed/pandas
|
scripts/announce.py
|
7
|
3592
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
Script to generate contributor and pull request lists
This script generates contributor and pull request lists for release
announcements using Github v3 protocol. Use requires an authentication token in
order to have sufficient bandwidth, you can get one following the directions at
`<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>_
Don't add any scope, as the default is read access to public information. The
token may be stored in an environment variable as you only get one chance to
see it.
Usage::
$ ./scripts/announce.py <token> <revision range>
The output is utf8 rst.
Dependencies
------------
- gitpython
- pygithub
Some code was copied from scipy `tools/gh_lists.py` and `tools/authors.py`.
Examples
--------
From the bash command line with $GITHUB token.
$ ./scripts/announce.py $GITHUB v1.11.0..v1.11.1 > announce.rst
"""
from __future__ import print_function, division
import os
import re
import codecs
from git import Repo
UTF8Writer = codecs.getwriter('utf8')
this_repo = Repo(os.path.join(os.path.dirname(__file__), ".."))
author_msg = """\
A total of %d people contributed to this release. People with a "+" by their
names contributed a patch for the first time.
"""
pull_request_msg = """\
A total of %d pull requests were merged for this release.
"""
def get_authors(revision_range):
pat = u'^.*\\t(.*)$'
lst_release, cur_release = [r.strip() for r in revision_range.split('..')]
# authors, in current release and previous to current release.
cur = set(re.findall(pat, this_repo.git.shortlog('-s', revision_range),
re.M))
pre = set(re.findall(pat, this_repo.git.shortlog('-s', lst_release),
re.M))
# Homu is the author of auto merges, clean him out.
cur.discard('Homu')
pre.discard('Homu')
# Append '+' to new authors.
authors = [s + u' +' for s in cur - pre] + [s for s in cur & pre]
authors.sort()
return authors
def get_pull_requests(repo, revision_range):
prnums = []
# From regular merges
merges = this_repo.git.log(
'--oneline', '--merges', revision_range)
issues = re.findall(u"Merge pull request \\#(\\d*)", merges)
prnums.extend(int(s) for s in issues)
# From Homu merges (Auto merges)
issues = re. findall(u"Auto merge of \\#(\\d*)", merges)
prnums.extend(int(s) for s in issues)
# From fast forward squash-merges
commits = this_repo.git.log(
'--oneline', '--no-merges', '--first-parent', revision_range)
issues = re.findall(u'^.*\\(\\#(\\d+)\\)$', commits, re.M)
prnums.extend(int(s) for s in issues)
# get PR data from github repo
prnums.sort()
prs = [repo.get_pull(n) for n in prnums]
return prs
def main(revision_range, repo):
lst_release, cur_release = [r.strip() for r in revision_range.split('..')]
# document authors
authors = get_authors(revision_range)
heading = u"Contributors"
print()
print(heading)
print(u"=" * len(heading))
print(author_msg % len(authors))
for s in authors:
print(u'* ' + s)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Generate author lists for release")
parser.add_argument('revision_range', help='<revision>..<revision>')
parser.add_argument('--repo', help="Github org/repository",
default="pandas-dev/pandas")
args = parser.parse_args()
main(args.revision_range, args.repo)
|
bsd-3-clause
|
cmu-delphi/delphi-epidata
|
integrations/server/test_covidcast_endpoints.py
|
1
|
16599
|
"""Integration tests for the custom `covidcast/*` endpoints."""
# standard library
from typing import Iterable, Dict, Any
import unittest
from io import StringIO
# from typing import Optional
from dataclasses import dataclass
# third party
import mysql.connector
import requests
import pandas as pd
from delphi_utils import Nans
from delphi.epidata.acquisition.covidcast.covidcast_meta_cache_updater import main as update_cache
# use the local instance of the Epidata API
BASE_URL = "http://delphi_web_epidata/epidata/covidcast"
@dataclass
class CovidcastRow:
id: int = 0
source: str = "src"
signal: str = "sig"
time_type: str = "day"
geo_type: str = "county"
time_value: int = 20200411
geo_value: str = "01234"
value_updated_timestamp: int = 20200202
value: float = 10.0
stderr: float = 0
sample_size: float = 10
direction_updated_timestamp: int = 20200202
direction: int = 0
issue: int = 20200202
lag: int = 0
is_latest_issue: bool = True
is_wip: bool = False
missing_value: int = Nans.NOT_MISSING
missing_stderr: int = Nans.NOT_MISSING
missing_sample_size: int = Nans.NOT_MISSING
def __str__(self):
return f"""(
{self.id},
'{self.source}',
'{self.signal}',
'{self.time_type}',
'{self.geo_type}',
{self.time_value},
'{self.geo_value}',
{self.value_updated_timestamp},
{self.value},
{self.stderr},
{self.sample_size},
{self.direction_updated_timestamp},
{self.direction},
{self.issue},
{self.lag},
{self.is_latest_issue},
{self.is_wip},
{self.missing_value},
{self.missing_stderr},
{self.missing_sample_size}
)"""
@staticmethod
def from_json(json: Dict[str, Any]) -> "CovidcastRow":
return CovidcastRow(
source=json["source"],
signal=json["signal"],
time_type=json["time_type"],
geo_type=json["geo_type"],
geo_value=json["geo_value"],
direction=json["direction"],
issue=json["issue"],
lag=json["lag"],
value=json["value"],
stderr=json["stderr"],
sample_size=json["sample_size"],
missing_value=json["missing_value"],
missing_stderr=json["missing_stderr"],
missing_sample_size=json["missing_sample_size"],
)
@property
def signal_pair(self):
return f"{self.source}:{self.signal}"
@property
def geo_pair(self):
return f"{self.geo_type}:{self.geo_value}"
@property
def time_pair(self):
return f"{self.time_type}:{self.time_value}"
class CovidcastEndpointTests(unittest.TestCase):
"""Tests the `covidcast/*` endpoint."""
def setUp(self):
"""Perform per-test setup."""
# connect to the `epidata` database and clear the `covidcast` table
cnx = mysql.connector.connect(user="user", password="pass", host="delphi_database_epidata", database="epidata")
cur = cnx.cursor()
cur.execute("truncate table covidcast")
cur.execute('update covidcast_meta_cache set timestamp = 0, epidata = ""')
cnx.commit()
cur.close()
# make connection and cursor available to test cases
self.cnx = cnx
self.cur = cnx.cursor()
def tearDown(self):
"""Perform per-test teardown."""
self.cur.close()
self.cnx.close()
def _insert_rows(self, rows: Iterable[CovidcastRow]):
sql = ",\n".join((str(r) for r in rows))
self.cur.execute(
f"""
INSERT INTO
`covidcast` (`id`, `source`, `signal`, `time_type`, `geo_type`,
`time_value`, `geo_value`, `value_updated_timestamp`,
`value`, `stderr`, `sample_size`, `direction_updated_timestamp`,
`direction`, `issue`, `lag`, `is_latest_issue`, `is_wip`,`missing_value`,
`missing_stderr`,`missing_sample_size`)
VALUES
{sql}
"""
)
self.cnx.commit()
return rows
def _fetch(self, endpoint="/", **params):
# make the request
response = requests.get(
f"{BASE_URL}{endpoint}",
params=params,
)
response.raise_for_status()
return response.json()
def test_basic(self):
"""Request a signal the / endpoint."""
rows = [CovidcastRow(time_value=20200401 + i, value=i) for i in range(10)]
first = rows[0]
self._insert_rows(rows)
with self.subTest("validation"):
out = self._fetch("/")
self.assertEqual(out["result"], -1)
with self.subTest("simple"):
out = self._fetch("/", signal=first.signal_pair, geo=first.geo_pair, time="day:*")
self.assertEqual(len(out["epidata"]), len(rows))
def test_trend(self):
"""Request a signal the /trend endpoint."""
num_rows = 30
rows = [CovidcastRow(time_value=20200401 + i, value=i) for i in range(num_rows)]
first = rows[0]
last = rows[-1]
ref = rows[num_rows // 2]
self._insert_rows(rows)
out = self._fetch("/trend", signal=first.signal_pair, geo=first.geo_pair, date=last.time_value, window="20200401-20201212", basis=ref.time_value)
self.assertEqual(out["result"], 1)
self.assertEqual(len(out["epidata"]), 1)
trend = out["epidata"][0]
self.assertEqual(trend["geo_type"], last.geo_type)
self.assertEqual(trend["geo_value"], last.geo_value)
self.assertEqual(trend["signal_source"], last.source)
self.assertEqual(trend["signal_signal"], last.signal)
self.assertEqual(trend["date"], last.time_value)
self.assertEqual(trend["value"], last.value)
self.assertEqual(trend["basis_date"], ref.time_value)
self.assertEqual(trend["basis_value"], ref.value)
self.assertEqual(trend["basis_trend"], "increasing")
self.assertEqual(trend["min_date"], first.time_value)
self.assertEqual(trend["min_value"], first.value)
self.assertEqual(trend["min_trend"], "increasing")
self.assertEqual(trend["max_date"], last.time_value)
self.assertEqual(trend["max_value"], last.value)
self.assertEqual(trend["max_trend"], "steady")
def test_trendseries(self):
"""Request a signal the /trendseries endpoint."""
num_rows = 3
rows = [CovidcastRow(time_value=20200401 + i, value=num_rows - i) for i in range(num_rows)]
first = rows[0]
last = rows[-1]
self._insert_rows(rows)
out = self._fetch("/trendseries", signal=first.signal_pair, geo=first.geo_pair, date=last.time_value, window="20200401-20200410", basis=1)
self.assertEqual(out["result"], 1)
self.assertEqual(len(out["epidata"]), 3)
trends = out["epidata"]
def match_row(trend, row):
self.assertEqual(trend["geo_type"], row.geo_type)
self.assertEqual(trend["geo_value"], row.geo_value)
self.assertEqual(trend["signal_source"], row.source)
self.assertEqual(trend["signal_signal"], row.signal)
self.assertEqual(trend["date"], row.time_value)
self.assertEqual(trend["value"], row.value)
with self.subTest("trend0"):
trend = trends[0]
match_row(trend, first)
self.assertEqual(trend["basis_date"], None)
self.assertEqual(trend["basis_value"], None)
self.assertEqual(trend["basis_trend"], "unknown")
self.assertEqual(trend["min_date"], last.time_value)
self.assertEqual(trend["min_value"], last.value)
self.assertEqual(trend["min_trend"], "increasing")
self.assertEqual(trend["max_date"], first.time_value)
self.assertEqual(trend["max_value"], first.value)
self.assertEqual(trend["max_trend"], "steady")
with self.subTest("trend1"):
trend = trends[1]
match_row(trend, rows[1])
self.assertEqual(trend["basis_date"], first.time_value)
self.assertEqual(trend["basis_value"], first.value)
self.assertEqual(trend["basis_trend"], "decreasing")
self.assertEqual(trend["min_date"], last.time_value)
self.assertEqual(trend["min_value"], last.value)
self.assertEqual(trend["min_trend"], "increasing")
self.assertEqual(trend["max_date"], first.time_value)
self.assertEqual(trend["max_value"], first.value)
self.assertEqual(trend["max_trend"], "decreasing")
with self.subTest("trend2"):
trend = trends[2]
match_row(trend, last)
self.assertEqual(trend["basis_date"], rows[1].time_value)
self.assertEqual(trend["basis_value"], rows[1].value)
self.assertEqual(trend["basis_trend"], "decreasing")
self.assertEqual(trend["min_date"], last.time_value)
self.assertEqual(trend["min_value"], last.value)
self.assertEqual(trend["min_trend"], "steady")
self.assertEqual(trend["max_date"], first.time_value)
self.assertEqual(trend["max_value"], first.value)
self.assertEqual(trend["max_trend"], "decreasing")
def test_correlation(self):
"""Request a signal the /correlation endpoint."""
num_rows = 30
reference_rows = [CovidcastRow(signal="ref", time_value=20200401 + i, value=i) for i in range(num_rows)]
first = reference_rows[0]
self._insert_rows(reference_rows)
other_rows = [CovidcastRow(signal="other", time_value=20200401 + i, value=i) for i in range(num_rows)]
other = other_rows[0]
self._insert_rows(other_rows)
max_lag = 3
out = self._fetch("/correlation", reference=first.signal_pair, others=other.signal_pair, geo=first.geo_pair, window="20200401-20201212", lag=max_lag)
self.assertEqual(out["result"], 1)
df = pd.DataFrame(out["epidata"])
self.assertEqual(len(df), max_lag * 2 + 1) # -...0...+
self.assertEqual(df["geo_type"].unique().tolist(), [first.geo_type])
self.assertEqual(df["geo_value"].unique().tolist(), [first.geo_value])
self.assertEqual(df["signal_source"].unique().tolist(), [other.source])
self.assertEqual(df["signal_signal"].unique().tolist(), [other.signal])
self.assertEqual(df["lag"].tolist(), list(range(-max_lag, max_lag + 1)))
self.assertEqual(df["r2"].unique().tolist(), [1.0])
self.assertEqual(df["slope"].unique().tolist(), [1.0])
self.assertEqual(df["intercept"].tolist(), [3.0, 2.0, 1.0, 0.0, -1.0, -2.0, -3.0])
self.assertEqual(df["samples"].tolist(), [num_rows - abs(l) for l in range(-max_lag, max_lag + 1)])
def test_csv(self):
"""Request a signal the /csv endpoint."""
rows = [CovidcastRow(time_value=20200401 + i, value=i) for i in range(10)]
first = rows[0]
self._insert_rows(rows)
response = requests.get(
f"{BASE_URL}/csv",
params=dict(signal=first.signal_pair, start_day="2020-04-01", end_day="2020-12-12", geo_type=first.geo_type),
)
response.raise_for_status()
out = response.text
df = pd.read_csv(StringIO(out), index_col=0)
self.assertEqual(df.shape, (len(rows), 10))
self.assertEqual(list(df.columns), ["geo_value", "signal", "time_value", "issue", "lag", "value", "stderr", "sample_size", "geo_type", "data_source"])
def test_backfill(self):
"""Request a signal the /backfill endpoint."""
num_rows = 10
issue_0 = [CovidcastRow(time_value=20200401 + i, value=i, sample_size=1, lag=0, issue=20200401 + i, is_latest_issue=False) for i in range(num_rows)]
issue_1 = [CovidcastRow(time_value=20200401 + i, value=i + 1, sample_size=2, lag=1, issue=20200401 + i + 1, is_latest_issue=False) for i in range(num_rows)]
last_issue = [CovidcastRow(time_value=20200401 + i, value=i + 2, sample_size=3, lag=2, issue=20200401 + i + 2, is_latest_issue=True) for i in range(num_rows)]
self._insert_rows([*issue_0, *issue_1, *last_issue])
first = issue_0[0]
out = self._fetch("/backfill", signal=first.signal_pair, geo=first.geo_pair, time="day:20200401-20201212", anchor_lag=3)
self.assertEqual(out["result"], 1)
df = pd.DataFrame(out["epidata"])
self.assertEqual(len(df), 3 * num_rows) # num issues
self.assertEqual(df["time_value"].unique().tolist(), [l.time_value for l in last_issue])
# check first time point only
df_t0 = df[df["time_value"] == first.time_value]
self.assertEqual(len(df_t0), 3) # num issues
self.assertEqual(df_t0["issue"].tolist(), [issue_0[0].issue, issue_1[0].issue, last_issue[0].issue])
self.assertEqual(df_t0["value"].tolist(), [issue_0[0].value, issue_1[0].value, last_issue[0].value])
self.assertEqual(df_t0["sample_size"].tolist(), [issue_0[0].sample_size, issue_1[0].sample_size, last_issue[0].sample_size])
self.assertEqual(df_t0["value_rel_change"].astype("str").tolist(), ["nan", "1.0", "1.0"])
self.assertEqual(df_t0["sample_size_rel_change"].astype("str").tolist(), ["nan", "1.0", "0.5"]) #
self.assertEqual(df_t0["is_anchor"].tolist(), [False, False, True])
self.assertEqual(df_t0["value_completeness"].tolist(), [0 / 2, 1 / 2, 2 / 2]) # total 2, given 0,1,2
self.assertEqual(df_t0["sample_size_completeness"].tolist(), [1 / 3, 2 / 3, 3 / 3]) # total 2, given 0,1,2
def test_meta(self):
"""Request a signal the /meta endpoint."""
num_rows = 10
rows = [CovidcastRow(time_value=20200401 + i, value=i) for i in range(num_rows)]
self._insert_rows(rows)
first = rows[0]
last = rows[-1]
update_cache(args=None)
with self.subTest("plain"):
out = self._fetch("/meta")
self.assertEqual(len(out), 1)
stats = out[0]
self.assertEqual(stats["source"], first.source)
self.assertEqual(stats["signal"], first.signal)
self.assertEqual(stats["min_time"], first.time_value)
self.assertEqual(stats["max_time"], last.time_value)
self.assertEqual(stats["max_issue"], max(d.issue for d in rows))
self.assertTrue(first.geo_type in stats["geo_types"])
stats_g = stats["geo_types"][first.geo_type]
self.assertEqual(stats_g["min"], first.value)
self.assertEqual(stats_g["max"], last.value)
self.assertEqual(stats_g["mean"], sum(r.value for r in rows) / len(rows))
with self.subTest("filtered"):
out = self._fetch("/meta", signal=f"{first.source}:*")
self.assertEqual(len(out), 1)
self.assertEqual(out[0]["source"], first.source)
out = self._fetch("/meta", signal=f"{first.source}:X")
self.assertEqual(len(out), 0)
def test_coverage(self):
"""Request a signal the /coverage endpoint."""
num_geos_per_date = [10, 20, 30, 40, 44]
dates = [20200401 + i for i in range(len(num_geos_per_date))]
rows = [CovidcastRow(time_value=dates[i], value=i, geo_value=str(geo_value)) for i, num_geo in enumerate(num_geos_per_date) for geo_value in range(num_geo)]
self._insert_rows(rows)
first = rows[0]
with self.subTest("default"):
out = self._fetch("/coverage", signal=first.signal_pair, latest=dates[-1], format="json")
self.assertEqual(len(out), len(num_geos_per_date))
self.assertEqual([o["time_value"] for o in out], dates)
self.assertEqual([o["count"] for o in out], num_geos_per_date)
with self.subTest("specify window"):
out = self._fetch("/coverage", signal=first.signal_pair, window=f"{dates[0]}-{dates[1]}", format="json")
self.assertEqual(len(out), 2)
self.assertEqual([o["time_value"] for o in out], dates[:2])
self.assertEqual([o["count"] for o in out], num_geos_per_date[:2])
with self.subTest("invalid geo_type"):
out = self._fetch("/coverage", signal=first.signal_pair, geo_type="state", format="json")
self.assertEqual(len(out), 0)
|
mit
|
effigies/mne-python
|
mne/decoding/tests/test_ems.py
|
19
|
1969
|
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_equal, assert_raises
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn
from mne.decoding import compute_ems
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters"""
raw = io.Raw(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id, copy=False)
assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id, copy=False)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
assert_raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
raw.close()
|
bsd-3-clause
|
abhisg/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
261
|
4490
|
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
|
bsd-3-clause
|
mwv/scikit-learn
|
benchmarks/bench_covertype.py
|
154
|
7296
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
lioritan/Thesis
|
problems/techTCrun.py
|
1
|
5996
|
from numpy import *
#import bruteforce_propo as compete
import alg10_ficuslike as godfish
#import alg7 as compete
#import bruteforce_propo as compete
import yago
import string
import os
import time
from matplotlib.mlab import find
from tree_utils import *
import pydot
from alg10_ficuslike import ig_ratio
def feature_select_ig(trn, trn_lbl, tst, fraction):
ig_ratios=[(ig_ratio(trn[:,j], trn_lbl), j) for j in xrange(size(trn,1))]
sorted_features_by_ig= sorted(ig_ratios, reverse=True)[:int(size(trn,1)*fraction)]
idxs=[b for a,b in sorted_features_by_ig]
return trn[:, idxs], tst[:, idxs]
if __name__=='__main__':
import cPickle as pickle
fptr=open('yago_relationss_smaller.pkl', 'rb')
relationss= pickle.load(fptr)
fptr.close()
print 'yago loaded'
# vgjfhjfhj.jfghf()
# relationss.pop('earth')
# relationss.pop('reverse_earth')
# removed=[]
# for key in relationss.keys():
# if key.startswith('reverse_') and max([len(x) for x in relationss[key].values()])>50:
# removed.append(key)
#
# for key in removed:
# relationss.pop(key)
# dfgdfgfgf.sfgsfg()
datasets=[]
for path,dirnames, filenames in os.walk('./techtc_processed_fixed/'):
filenames.sort()
for filename in filenames:
fptr=open(path+'/'+filename, 'rb')
datasets.append(pickle.load(fptr))
fptr.close()
#
res_7=[]
errs_svm= zeros((100, 3, 19)) #12->feature num percentage: 0.01,0.05,0.1,0.2,...,1.0
errs_knn= zeros((100,3, 19))
errs_tree= zeros((100,3,19))
errs_svm_na1= zeros((100,3, 19))
errs_knn_na1= zeros((100,3, 19))
# errs_tree_na1= zeros((100,3))
feature_nums= zeros((100, 3))
feature_names_list= []
for count,((trn, trn_lbl),(tst,tst_lbl)) in enumerate(datasets):
print count
# if count >2:
# break
if count!=31 and count!=37 and count!=43 and count!=49 and count!=51 and count!=59 and count!=62 and count!=64 and count!=70:#each one goes different
#if count!=19 and count!=77 and count!=98 and count!=4 and count!=12 and count!=22 and count!=24 and count!=25:
continue
feature_name_trio= []
training,testing= array(trn, dtype=object), array(tst, dtype=object)
logfile1= open('results%d_log_rec0.txt'%(count), 'w')
logfile2= open('results%d_log_rec1.txt'%(count), 'w')
logfile3= open('results%d_log_rec2.txt'%(count), 'w')
logfiles=[logfile1, logfile2, logfile3]
for i in [7]: #switch back to [3,7] later. doesn't seem to matter much between 1/3/5 and 7/9(which are worse for tree)
for d in [0,1,2]:
blor= godfish.FeatureGenerationFromRDF(training, trn_lbl, relationss)
blor.generate_features(30*(2**2), d, i, logfiles[d], 10, 1)
logfiles[d].close()
trn, trn_lbl, tst, feature_names, feature_trees= blor.get_new_table(testing)
feature_name_trio.append(feature_names)
feature_nums[count, d]= len(blor.new_features)
for f_number,(rel,rec_tree) in enumerate(feature_trees):
print rel
with open('rec_tree_dot depth %d,dataset%d,num %d.dot'%(d,count,f_number+1), 'wb') as fptr:
fptr.write(make_graphviz_string(rec_tree))
#export_to_pdf(rec_tree, 'rec_tree%d,dataset%d.pdf'%(f_number+1,count))
for j,fraction in enumerate([0.005,0.0075,0.01,0.025,0.05,0.075,0.1,0.125,0.15,0.175,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]):
new_trn, new_tst= feature_select_ig(trn, trn_lbl, tst, fraction)
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
clf= SVC(kernel='linear', C=100)
clf.fit(new_trn, trn_lbl)
tst_predict= clf.predict(new_tst)
errs_svm[count, d, j]= mean(tst_predict!=tst_lbl)
clf= KNeighborsClassifier(n_neighbors=1)
clf.fit(new_trn, trn_lbl)
tst_predict= clf.predict(new_tst)
errs_knn[count, d, j]= mean(tst_predict!=tst_lbl)
clf=DecisionTreeClassifier(criterion='entropy', min_samples_split=2, random_state=0)
clf.fit(new_trn, trn_lbl)
tst_predict= clf.predict(new_tst)
errs_tree[count,d,j] = mean(tst_predict!=tst_lbl)
# #results:
# errs_tree[count, d]= mean(tst_predict!=tst_lbl)
# feature_name_trio.append(feature_names)
# feature_nums[count, d]= len(blor.new_features)
# new_trn[new_trn==-100]= -1
# clf=SVC(kernel='linear', C=100)
# clf.fit(new_trn, trn_lbl)
# errs_svm_na1[count, d, j]= mean(clf.predict(new_tst)!=tst_lbl)
# clf=KNeighborsClassifier(n_neighbors=1)
# clf.fit(new_trn, trn_lbl)
# errs_knn_na1[count, d, j]= mean(clf.predict(new_tst)!=tst_lbl)
# clf=DecisionTreeClassifier(criterion='entropy',min_samples_split=2, random_state=0)
# clf.fit(trn, trn_lbl)
# errs_tree_na1[count, d]= mean(clf.predict(tst)!=tst_lbl)
feature_names_list.append(feature_name_trio)
a=(errs_svm[count,:,:], errs_knn[count,:,:], errs_tree[count,:,:], feature_nums[count,:])
with open('results%d.pkl'%(count),'wb') as fptr:
pickle.dump(a, fptr, -1)
# with open('final_res.pkl','wb') as fptr:
# pickle.dump((errs_svm, errs_knn, errs_svm_na1, errs_knn_na1, feature_names_list, feature_nums), fptr, -1)
|
gpl-2.0
|
vsimonis/worm1
|
testLazySegVidOut.py
|
1
|
4695
|
'''
Created on Feb 6, 2014
@author: Valerie
'''
import io
import time
#import picamera
#from imgProc import imgProc
#from PIL import Image
import matplotlib
matplotlib.use("Agg")
from skimage import io as skio
from skimage import color
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import sys
#import threading
#from easyEBB import easyEBB
def tic():
return time.time()
def toc(t):
return time.time() - t
def rgb2grayV(I):
I = I.astype(float)
try:
np.size(I,2)
J = 1.0/3 * (I[:,:,0]+ I[:,:,1] + I[:,:,2]);
J = J.astype(int)
return J
except ValueError:
print "Not a 3-D array"
return
DURATION = 1000; #in ms
STEPX = 1/100 #pixels per step
STEPY = 1/100 #pixels per step
BOUNDX = 200; #pixels
BOUNDY = 200; #pixels
VIDLEN = 600; #in sec
PING = 0.5; #in sec
WINDOW = 10; #number of frames to average
i = 0
DPI = 800;
XPI = 1080
YPI = 1080
cap = cv2.VideoCapture('C:\\Users\\vsimonis\\Documents\\MATLAB\\WormTracker\\Media\\led_move1.avi')
#ret, frame = cap.read()
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Estimating Worm', artist='Matplotlib',
comment='All maxima-minima, 200 bound, new refs, flipx-y 300 s')
writer = FFMpegWriter(fps=30, metadata=metadata)
fig = plt.figure(dpi = DPI)
startT = time.time() # start time
lastCheck = startT - PING # artificial last check
now = startT
ref = None;
#print "size ref: %d" % np.size(ref)
xds = [];
yds = [];
xr = 0;
yr = 0;
#print 'read'
#try:
with writer.saving(fig, "writer_test10.mp4", 100):
while True:
now = time.time()
#camera.wait_recording(1)
if now - lastCheck >= PING:
#print 'new image'
#print 'read'
ret, frame = cap.read()
# t = tic()
lastCheck = time.time()
#camera.capture(stream2, format='jpeg', use_video_port = True)
#stream2.seek(0)
#img = frame[:,:,1]
#print 'rgbGray'
img = rgb2grayV(frame).astype(float)
if np.size(ref) == 1:
#print 'new REFERENCE'
#print 'new ref'
ref = img;
pass
else:
#print 'img SUBTRACTION'
sub = img - ref.astype(float)
x, y = np.nonzero(sub == np.max(sub))
xr, yr = np.nonzero(sub == np.min(sub))
#i = np.argmin(sub)
#print i
#s = (np.size(sub, 0), np.size(sub,1))
#print s
#j = np.argmax(sub)
# print j
#x, y = np.unravel_index(i, s)
#x, y = np.unravel_index(j, s)
#print "x:\t%d\ty:\t%d" % (x, y)
#print "xr:\t%d\tyr:\t%d" % (xr, yr)
xds.append(x[0] - xr[0] )
yds.append(y[0] - yr[0] )
#print 'plot'
fig.clf()
ip = plt.imshow(sub, cmap = 'gray')
ip.set_clim(sub.min(), sub.max())
plt.scatter(y, x, c = 'r')
plt.scatter(yr, xr, c = 'b')
#plt.show()
#print xds
#print yds
#print "len: %d" % len(xds)
if (len(xds) > WINDOW):
xds.pop(0)
yds.pop(0)
#print 'update means'
#print xds
#print yds
mx = np.mean(xds)
my = np.mean(yds)
#print "mean x: %d y: %d" % (mx, my)
if abs(mx) > BOUNDX or abs(my) > BOUNDY:
#print "MOVE %d, %d" % (mx, my)
#print sub
#plt.clf()
#plt.imshow(sub, cmap = 'gray')
#plt.scatter(x, y, c = 'r')
#plt.scatter(xr, yr, c = 'b')
ref = None
#print 'write frames'
writer.grab_frame()
#plt.show()
#ref = None
#print "time: %f s" % toc(t)
print 'time elapsed: %f' % (now - startT)
if (now - startT >= VIDLEN) or ret == False:
print 'time elapsed: %f' % (now - startT)
print 'end video'
#cap.close()
sys.exit()
|
mit
|
TomAugspurger/pandas
|
pandas/tests/series/test_apply.py
|
1
|
30121
|
from collections import Counter, defaultdict
from itertools import chain
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, isna
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
datetime_series.apply(np.sqrt), np.sqrt(datetime_series)
)
# element-wise apply
import math
tm.assert_series_equal(
datetime_series.apply(math.exp), np.exp(datetime_series)
)
# empty series
s = Series(dtype=object, name="foo", index=pd.Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=pd.date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = pd.Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = pd.Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
s = pd.Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = pd.Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
# GH 21245
s = pd.Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
class TestSeriesAggregate:
def test_transform(self, string_series):
# transforming functions
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.transform(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
result = string_series.apply(np.sqrt)
tm.assert_series_equal(result, expected)
# list-like
result = string_series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.transform([np.sqrt])
tm.assert_frame_equal(result, expected)
result = string_series.transform(["sqrt"])
tm.assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
result = string_series.transform(["sqrt", "abs"])
expected.columns = ["sqrt", "abs"]
tm.assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
msg = "transforms cannot produce aggregated results"
with pytest.raises(ValueError, match=msg):
string_series.transform(["min", "max"])
msg = "cannot combine transform and aggregation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(["sqrt", "max"])
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.transform(["sqrt", "max"])
msg = "cannot perform both aggregation and transformation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg({"foo": np.sqrt, "bar": "sum"})
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"]})
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype="int64", name="series")
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]})
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(
lambda x: Series([x, x ** 2], index=["x", "x^2"])
)
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg("size")
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", "c"), # see GH12863
("any", "a"),
],
),
),
)
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if tm.is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
with pytest.raises(expected):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_transform_none_to_type(self):
# GH34377
df = pd.DataFrame({"a": [None]})
msg = "DataFrame constructor called with incompatible data and dtype"
with pytest.raises(TypeError, match=msg):
df.transform({"a": int})
class TestSeriesMap:
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(self, indices):
if isinstance(indices, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(indices)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int(self):
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference(self):
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = pd.DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = pd.Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter(self):
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key(self):
# https://github.com/pandas-dev/pandas/issues/17648
# Checks that np.nan key is appropriately mapped
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
# __missing__ is a dict concept, not a Mapping concept,
# so it should not change the result!
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = pd.Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
exp = pd.Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
def test_apply_series_on_date_time_index_aware_series(self, dti, exp):
# GH 25959
# Calling apply on a localized time series should not cause an error
index = dti.tz_localize("UTC").index
result = pd.Series(index).apply(lambda x: pd.Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = pd.Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, pd.Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
# GH 13228
ser = pd.Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
# https://github.com/pandas-dev/pandas/issues/32815
s = pd.Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
def test_apply_to_timedelta(self):
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
|
bsd-3-clause
|
ankurankan/scikit-learn
|
sklearn/cluster/spectral.py
|
15
|
17944
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
bsd-3-clause
|
numenta-archive/htmresearch
|
projects/union_pooling/experiments/union_sdr_continuous/union_pooling_trained_tm.py
|
8
|
9042
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import sys
import time
import os
import yaml
from optparse import OptionParser
import numpy
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.frameworks.union_temporal_pooling.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
_SHOW_PROGRESS_INTERVAL = 200
"""
Experiment 1
Runs UnionTemporalPooler on input from a Temporal Memory after training
on a long sequence
"""
def experiment1():
paramDir = 'params/1024_baseline/5_trainingPasses.yaml'
outputDir = 'results/'
params = yaml.safe_load(open(paramDir, 'r'))
options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
plotVerbosity = 2
consoleVerbosity = 1
print "Running SDR overlap experiment...\n"
print "Params dir: {0}".format(paramDir)
print "Output dir: {0}\n".format(outputDir)
# Dimensionality of sequence patterns
patternDimensionality = params["patternDimensionality"]
# Cardinality (ON / true bits) of sequence patterns
patternCardinality = params["patternCardinality"]
# TODO If this parameter is to be supported, the sequence generation code
# below must change
# Number of unique patterns from which sequences are built
# patternAlphabetSize = params["patternAlphabetSize"]
# Length of sequences shown to network
sequenceLength = params["sequenceLength"]
# Number of sequences used. Sequences may share common elements.
numberOfSequences = params["numberOfSequences"]
# Number of sequence passes for training the TM. Zero => no training.
trainingPasses = params["trainingPasses"]
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
start = time.time()
print "\nGenerating sequences..."
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
for i in xrange(numberOfSequences)]
labeledSequences = []
for label in sequenceLabels:
for _ in xrange(sequenceLength):
labeledSequences.append(label)
labeledSequences.append(None)
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)
# Train only the Temporal Memory on the generated sequences
if trainingPasses > 0:
print "\nTraining Temporal Memory..."
if consoleVerbosity > 0:
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
for i in xrange(trainingPasses):
experiment.runNetworkOnSequences(generatedSequences,
labeledSequences,
tmLearn=True,
upLearn=None,
verbosity=consoleVerbosity,
progressInterval=_SHOW_PROGRESS_INTERVAL)
if consoleVerbosity > 0:
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
# Reset the TM monitor mixin's records accrued during this training pass
# experiment.tm.mmClearHistory()
print
print MonitorMixinBase.mmPrettyPrintMetrics(
experiment.tm.mmGetDefaultMetrics())
print
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
print "\nRunning test phase..."
inputSequences = generatedSequences
inputCategories = labeledSequences
tmLearn = True
upLearn = False
classifierLearn = False
currentTime = time.time()
experiment.tm.reset()
experiment.up.reset()
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1))
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1))
activeSPTrace = numpy.zeros((experiment.up._numColumns, 1))
for _ in xrange(trainingPasses):
experiment.tm.reset()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
if upLearn is not None:
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
currentPoolingActivation = experiment.up._poolingActivation
currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
currentUnionSDR[experiment.up._unionSDR] = 1
activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
currentSPSDR[experiment.up._activeCells] = 1
activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2])
print
print MonitorMixinBase.mmPrettyPrintMetrics(\
experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
print
experiment.tm.mmClearHistory()
# estimate fraction of shared bits across adjacent time point
unionSDRshared = experiment.up._mmComputeUnionSDRdiff()
bitLifeList = experiment.up._mmComputeBitLifeStats()
bitLife = numpy.array(bitLifeList)
# Plot SP outputs, UP persistence and UP outputs in testing phase
def showSequenceStartLine(ax, trainingPasses, sequenceLength):
for i in xrange(trainingPasses):
ax.vlines(i*sequenceLength, 0, 100, linestyles='--')
plt.figure()
ncolShow = 100
f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
plt.title('Union SDR')
ax2.set_xlabel('Time (steps)')
pp = PdfPages('results/UnionPoolingOnLearnedTM_Experiment1.pdf')
pp.savefig()
pp.close()
f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1)
ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100)
ax1.set_ylabel('Union SDR size (%)')
ax1.set_xlabel('Time (steps)')
ax1.set_ylim(0,25)
ax2.plot(unionSDRshared)
ax2.set_ylabel('Shared Bits')
ax2.set_xlabel('Time (steps)')
ax3.hist(bitLife)
ax3.set_xlabel('Life duration for each bit')
pp = PdfPages('results/UnionSDRproperty_Experiment1.pdf')
pp.savefig()
pp.close()
if __name__ == "__main__":
experiment1()
|
agpl-3.0
|
mfjb/scikit-learn
|
sklearn/tree/export.py
|
53
|
15772
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _tree.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
vortex-ape/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
7
|
31748
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from itertools import product
import pytest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import ConvergenceWarning
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.datasets import make_multilabel_classification
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
@pytest.mark.parametrize('solver',
("svd", "sparse_cg", "cholesky", "lsqr", "sag"))
def test_ridge(solver):
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_regression_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_regression_convergence_fail():
rng = np.random.RandomState(0)
y = rng.randn(5)
X = rng.randn(5, 10)
assert_warns(ConvergenceWarning, ridge_regression,
X, y, alpha=1.0, solver="sparse_cg",
tol=0., max_iter=None, verbose=1)
def test_ridge_sample_weights():
# TODO: loop over sparse data as well
# Note: parametrizing this test with pytest results in failed
# assertions, meaning that is is not extremely robust
rng = np.random.RandomState(0)
param_grid = product((1.0, 1e-2), (True, False),
('svd', 'cholesky', 'lsqr', 'sparse_cg'))
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for (alpha, intercept, solver) in param_grid:
# Ridge with explicit sample_weight
est = Ridge(alpha=alpha, fit_intercept=intercept,
solver=solver, tol=1e-6)
est.fit(X, y, sample_weight=sample_weight)
coefs = est.coef_
inter = est.intercept_
# Closed form of the weighted regularized least square
# theta = (X^T W X + alpha I)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
I = np.eye(n_features)
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
I = np.eye(n_features + 1)
I[0, 0] = 0
cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs, cf_coefs)
else:
assert_array_almost_equal(coefs, cf_coefs[1:])
assert_almost_equal(inter, cf_coefs[0])
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
reg = Ridge(alpha=0.0)
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(reg.predict(X_test), [1., 2, 3, 4])
assert_equal(len(reg.coef_.shape), 1)
assert_equal(type(reg.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(reg.coef_.shape), 2)
assert_equal(type(reg.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag', 'saga']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
fit_intercept = filter_ == DENSE_FILTER
if fit_intercept:
X_diabetes_ = X_diabetes - X_diabetes.mean(0)
else:
X_diabetes_ = X_diabetes
ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept)
ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept)
# because fit_intercept is applied
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes, fit_intercept)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes_[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes_[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes_, y_diabetes, fit_intercept)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('neg_mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv_normalize(filter_):
ridge_cv = RidgeCV(normalize=True, cv=3)
ridge_cv.fit(filter_(10. * X_diabetes), y_diabetes)
gs = GridSearchCV(Ridge(normalize=True), cv=3,
param_grid={'alpha': ridge_cv.alphas})
gs.fit(filter_(10. * X_diabetes), y_diabetes)
assert_equal(gs.best_estimator_.alpha, ridge_cv.alpha_)
def _test_ridge_cv(filter_):
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for reg in (RidgeClassifier(), RidgeClassifierCV()):
reg.fit(filter_(X_iris), y_iris)
assert_equal(reg.coef_.shape, (n_classes, n_features))
y_pred = reg.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
cv = KFold(5)
reg = RidgeClassifierCV(cv=cv)
reg.fit(filter_(X_iris), y_iris)
y_pred = reg.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def check_dense_sparse(test_func):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
@pytest.mark.parametrize(
'test_func',
(_test_ridge_loo, _test_ridge_cv, _test_ridge_cv_normalize,
_test_ridge_diabetes, _test_multi_ridge_diabetes,
_test_ridge_classifiers, _test_tolerance))
def test_dense_sparse(test_func):
check_dense_sparse(test_func)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
reg = RidgeClassifier(class_weight={1: 0.001})
reg.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
reg = RidgeClassifier(class_weight='balanced')
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
rega = RidgeClassifier(class_weight='balanced')
rega.fit(X, y)
assert_equal(len(rega.classes_), 2)
assert_array_almost_equal(reg.coef_, rega.coef_)
assert_array_almost_equal(reg.intercept_, rega.intercept_)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
@pytest.mark.parametrize('reg', (RidgeClassifier, RidgeClassifierCV))
def test_class_weight_vs_sample_weight(reg):
"""Check class_weights resemble sample_weights behavior."""
# Iris is balanced, so no effect expected for using 'balanced' weights
reg1 = reg()
reg1.fit(iris.data, iris.target)
reg2 = reg(class_weight='balanced')
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Check that sample_weight and class_weight are multiplicative
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight ** 2)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(reg1.coef_, reg2.coef_)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
reg.fit(X, y)
# we give a small weights to class 1
reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
reg.fit(X, y)
assert_array_equal(reg.predict([[-.2, 2]]), np.array([-1]))
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_ridgecv_store_cv_values():
rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, cv=None, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert r.cv_values_.shape == (n_samples, n_alphas)
# with len(y.shape) == 2
n_targets = 3
y = rng.randn(n_samples, n_targets)
r.fit(x, y)
assert r.cv_values_.shape == (n_samples, n_targets, n_alphas)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_ridge_classifier_cv_store_cv_values():
x = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
n_samples = x.shape[0]
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeClassifierCV(alphas=alphas, cv=None, store_cv_values=True)
# with len(y.shape) == 1
n_targets = 1
r.fit(x, y)
assert r.cv_values_.shape == (n_samples, n_targets, n_alphas)
# with len(y.shape) == 2
y = np.array([[1, 1, 1, -1, -1],
[1, -1, 1, -1, 1],
[-1, -1, 1, -1, -1]]).transpose()
n_targets = y.shape[1]
r.fit(x, y)
assert r.cv_values_.shape == (n_samples, n_targets, n_alphas)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
gs = GridSearchCV(Ridge(), parameters, cv=cv)
gs.fit(X, y, sample_weight=sample_weight)
assert ridgecv.alpha_ == gs.best_estimator_.alpha
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_ridgecv_int_alphas():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
# Integers
ridge = RidgeCV(alphas=(1, 10, 100))
ridge.fit(X, y)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_ridgecv_negative_alphas():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
# Negative integers
ridge = RidgeCV(alphas=(-1, -10, -100))
assert_raises_regex(ValueError,
"alphas cannot be negative.",
ridge.fit, X, y)
# Negative floats
ridge = RidgeCV(alphas=(-0.1, -1.0, -10.0))
assert_raises_regex(ValueError,
"alphas cannot be negative.",
ridge.fit, X, y)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'saga', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
for solver in ['saga', 'sag']:
dense = Ridge(alpha=1., tol=1.e-15, solver=solver, fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver=solver, fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
def test_errors_and_values_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
n = 5
y = rng.randn(n)
v = rng.randn(n)
Q = rng.randn(len(v), len(v))
QT_y = Q.T.dot(y)
G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_errors_and_values_svd_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
for n, p in zip((5, 10), (12, 6)):
y = rng.randn(n)
v = rng.randn(p)
U = rng.randn(n, p)
UT_y = U.T.dot(y)
G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_ridge_classifier_no_support_multilabel():
X, y = make_multilabel_classification(n_samples=10, random_state=0)
assert_raises(ValueError, RidgeClassifier().fit, X, y)
def test_dtype_match():
rng = np.random.RandomState(0)
alpha = 1.0
n_samples, n_features = 6, 5
X_64 = rng.randn(n_samples, n_features)
y_64 = rng.randn(n_samples)
X_32 = X_64.astype(np.float32)
y_32 = y_64.astype(np.float32)
solvers = ["svd", "sparse_cg", "cholesky", "lsqr"]
for solver in solvers:
# Check type consistency 32bits
ridge_32 = Ridge(alpha=alpha, solver=solver)
ridge_32.fit(X_32, y_32)
coef_32 = ridge_32.coef_
# Check type consistency 64 bits
ridge_64 = Ridge(alpha=alpha, solver=solver)
ridge_64.fit(X_64, y_64)
coef_64 = ridge_64.coef_
# Do the actual checks at once for easier debug
assert coef_32.dtype == X_32.dtype
assert coef_64.dtype == X_64.dtype
assert ridge_32.predict(X_32).dtype == X_32.dtype
assert ridge_64.predict(X_64).dtype == X_64.dtype
assert_almost_equal(ridge_32.coef_, ridge_64.coef_, decimal=5)
def test_dtype_match_cholesky():
# Test different alphas in cholesky solver to ensure full coverage.
# This test is separated from test_dtype_match for clarity.
rng = np.random.RandomState(0)
alpha = (1.0, 0.5)
n_samples, n_features, n_target = 6, 7, 2
X_64 = rng.randn(n_samples, n_features)
y_64 = rng.randn(n_samples, n_target)
X_32 = X_64.astype(np.float32)
y_32 = y_64.astype(np.float32)
# Check type consistency 32bits
ridge_32 = Ridge(alpha=alpha, solver='cholesky')
ridge_32.fit(X_32, y_32)
coef_32 = ridge_32.coef_
# Check type consistency 64 bits
ridge_64 = Ridge(alpha=alpha, solver='cholesky')
ridge_64.fit(X_64, y_64)
coef_64 = ridge_64.coef_
# Do all the checks at once, like this is easier to debug
assert coef_32.dtype == X_32.dtype
assert coef_64.dtype == X_64.dtype
assert ridge_32.predict(X_32).dtype == X_32.dtype
assert ridge_64.predict(X_64).dtype == X_64.dtype
assert_almost_equal(ridge_32.coef_, ridge_64.coef_, decimal=5)
|
bsd-3-clause
|
ankurankan/scikit-learn
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
mugurbil/gnm
|
examples/exp_time_series/acor_plot.py
|
1
|
3652
|
# -*- coding: utf-8 -*-
'''
Plot the results
'''
# import
import json
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from pylab import *
import os
import gnm
# command line options to set parameters
from optparse import OptionParser
parser = OptionParser()
# experiment number
parser.add_option('-c', dest='count', type='int',
default=0, help='count of experiment')
# for plotting
parser.add_option('-n', dest='num_bins', type='int',
default=100, help='number of bins')
parser.add_option('-l', dest='xmin', type='float',
default=-5., help='lower bound of range')
parser.add_option('-u', dest='xmax', type='float',
default=5., help='upper bound of range')
parser.add_option('-k', dest='k', type='int',
default=None, help='the dimension of plot')
parser.add_option('--all', action='store_true', dest='all',
default='False', help='plot all dimensions')
parser.add_option('--theory', action='store_true', dest='theo',
default='False', help='theoretical curve')
parser.add_option('--two', action='store_true', dest='two',
default='False', help='2d histogram')
(opts, args) = parser.parse_args()
# set the parameters
xmin = opts.xmin
xmax = opts.xmax
plot_range = [xmin,xmax]
num_bins = opts.num_bins
# load the samples
folder = 'acor_data_%d/' % opts.count
path = os.path.join(folder, 'chain')
file = open(path, 'r')
chain = json.load(file)
chain = np.array(chain)
n = len(chain)
N = len(chain[0])/2
file.close()
path = os.path.join(folder, 'stats')
file = open(path, 'r')
stats = json.load(file)
file.close()
import acor
tau = np.zeros(2*N)
for i in range(2*N):
tau[i] = acor.acor(chain[:,i],5)[0]
print tau
if opts.all==True :
k_list = range(2*N)
elif opts.k != None :
k_list = [opts.k]
else :
k_list = []
for k in k_list:
# histogram of samples
plt_smpld = plt.hist(chain[:,k],num_bins,color = 'b',
range=plot_range,normed=True,label='sampled',alpha=0.3)
if opts.theo==True :
# theoretical curve
path = os.path.join(folder, 'curve'+str(k))
data_file = open(path, 'r')
theoCurve = json.load(data_file)
data_file.close()
x_space = np.linspace(xmin,xmax,num=len(theoCurve))
plt_theo = plt.plot(x_space,theoCurve,color = 'r', linewidth =1,
label='quadrature')
# error bars
z, fhat, epsf = gnm.error_bars(k,chain,num_bins,plot_range)
epsf = epsf*sqrt(sqrt(tau[k])) # adjust the error with autocorrelation time
plt_err = plt.plot(z,fhat,color = 'b',marker = 's',
linewidth = 0,alpha=0.0)
plt.errorbar(z,fhat,yerr = epsf,fmt = 'k.')
# plot labels
plt.title('Histogram (n={:.0e}, P(A)={:.3})'.format(n,stats['accept_rate']))
if k < N :
x_label = '$w_%d$' % (k)
else :
x_label = '$\lambda_%d$' % (k-N)
plt.xlabel( x_label )
plt.ylabel('Probability')
plt.legend()
path = os.path.join(folder, 'acor_'+x_label.strip('\$')+'.pdf')
plt.savefig(path, dpi = 500)
plt.clf()
if opts.two==True :
for k in xrange(N):
# 2d histogram
plt.hist2d(chain[:,k], chain[:,k+N], bins=2*num_bins, normed=True)
colorbar()
plt.title('2D Histogram (n={:.0e},P(A)={:.3})'.format(n,stats['accept_rate']))
plt.xlabel('Weight ($w_%d$)' % k)
plt.ylabel('Exponent ($\lambda_%d$)' % k)
path = os.path.join(folder, 'acor_2d_%d.pdf' % k)
plt.savefig(path, dpi = 500)
plt.clf()
|
mit
|
Deiz/naev
|
utils/heatsim/heatsim.py
|
20
|
7285
|
#!/usr/bin/env python
#######################################################
#
# SIM CODE
#
#######################################################
# Imports
from frange import *
import math
import matplotlib.pyplot as plt
def clamp( a, b, x ):
return min( b, max( a, x ) )
class heatsim:
def __init__( self, shipname = "llama", weapname = "laser", simulation = [ 60., 120. ] ):
# Sim parameters
self.STEFAN_BOLZMANN = 5.67e-8
self.SPACE_TEMP = 250.
self.STEEL_COND = 54.
self.STEEL_CAP = 0.49
self.STEEL_DENS = 7.88e3
self.ACCURACY_LIMIT = 500
self.FIRERATE_LIMIT = 800
self.shipname = shipname
self.weapname = weapname
# Sim info
self.sim_dt = 1./50. # Delta tick
self.setSimulation( simulation )
# Load some data
self.ship_mass, self.ship_weaps = self.loadship( shipname )
self.weap_mass, self.weap_delay, self.weap_energy = self.loadweap( weapname )
def setSimulation( self, simulation ):
self.simulation = simulation
self.sim_total = simulation[-1]
def loadship( self, shipname ):
"Returns mass, number of weaps."
if shipname == "llama":
return 80., 2
elif shipname == "lancelot":
return 180., 4
elif shipname == "pacifier":
return 730., 5
elif shipname == "hawking":
return 3750., 7
elif shipname == "peacemaker":
return 6200., 8
else:
raise ValueError
def loadweap( self, weapname ):
"Returns mass, delay, energy."
if weapname == "laser":
return 2., 0.9, 4.25
elif weapname == "plasma":
return 4., 0.675, 3.75
elif weapname == "ion":
return 6., 1.440, 15.
elif weapname == "laser turret":
return 16., 0.540, 6.12
elif weapname == "ion turret":
return 42., 0.765, 25.
elif weapname == "railgun turret":
return 60., 1.102, 66.
else:
raise ValueError
def prepare( self ):
# Time stuff
self.time_data = []
# Calculate ship parameters
ship_kg = self.ship_mass * 1000.
self.ship_emis = 0.8
self.ship_cond = self.STEEL_COND
self.ship_C = self.STEEL_CAP * ship_kg
#self.ship_area = pow( ship_kg / self.STEEL_DENS, 2./3. )
self.ship_area = 4.*math.pi*pow( 3./4.*ship_kg/self.STEEL_DENS/math.pi, 2./3. )
self.ship_T = self.SPACE_TEMP
self.ship_data = []
# Calculate weapon parameters
weap_kg = self.weap_mass * 1000.
self.weap_C = self.STEEL_CAP * weap_kg
#self.weap_area = pow( weap_kg / self.STEEL_DENS, 2./3. )
self.weap_area = 2.*math.pi*pow( 3./4.*weap_kg/self.STEEL_DENS/math.pi, 2./3. )
self.weap_list = []
self.weap_T = []
self.weap_data = []
for i in range(self.ship_weaps):
self.weap_list.append( i*self.weap_delay / self.ship_weaps )
self.weap_T.append( self.SPACE_TEMP )
self.weap_data.append( [] )
def __accMod( self, T ):
return clamp( 0., 1., (T-500.)/600. )
def __frMod( self, T ):
return clamp( 0., 1., (1100.-T)/300. )
def simulate( self ):
"Begins the simulation."
# Prepare it
self.prepare()
# Run simulation
weap_on = True
sim_index = 0
dt = self.sim_dt
sim_elapsed = 0.
while sim_elapsed < self.sim_total:
Q_cond = 0.
# Check weapons
for i in range(len(self.weap_list)):
# Check if we should start/stop shooting
if self.simulation[ sim_index ] < sim_elapsed:
weap_on = not weap_on
sim_index += 1
# Check if shot
if weap_on:
self.weap_list[i] -= dt * self.__frMod( self.weap_T[i] )
if self.weap_list[i] < 0.:
self.weap_T[i] += 1e4 * self.weap_energy / self.weap_C
self.weap_list[i] += self.weap_delay
# Do heat movement (conduction)
Q = -self.ship_cond * (self.weap_T[i] - self.ship_T) * self.weap_area * dt
self.weap_T[i] += Q / self.weap_C
Q_cond += Q
self.weap_data[i].append( self.weap_T[i] )
# Do ship heat (radiation)
Q_rad = self.STEFAN_BOLZMANN * self.ship_area * self.ship_emis * (pow(self.SPACE_TEMP,4.) - pow(self.ship_T,4.)) * dt
Q = Q_rad - Q_cond
self.ship_T += Q / self.ship_C
self.time_data.append( sim_elapsed )
self.ship_data.append( self.ship_T )
# Elapsed time
sim_elapsed += dt;
def save( self, filename ):
"Saves the results to a file."
f = open( self.filename, 'w' )
for i in range(self.time_data):
f.write( str(self.time_data[i])+' '+str(self.ship_data[i]))
for j in range(self.weap_data):
f.write( ' '+str(self.weap_data[i][j]) )
f.write( '\n' )
f.close()
def display( self ):
print("Ship Temp: "+str(hs.ship_T)+" K")
for i in range(len(hs.weap_list)):
print("Outfit["+str(i)+"] Temp: "+str(hs.weap_T[i])+" K")
def plot( self, filename=None ):
plt.hold(False)
plt.figure(1)
# Plot 1 Data
plt.subplot(211)
plt.plot( self.time_data, self.ship_data, '-' )
# Plot 1 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.title( 'NAEV Heat Simulation ('+self.shipname+' with '+self.weapname+')' )
plt.legend( ('Ship', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper left')
plt.ylabel( 'Temperature [K]' )
plt.grid( True )
# Plot 1 Data
plt.subplot(212)
plt.plot( self.time_data, self.weap_data[0], '-' )
plt.hold(True)
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.ACCURACY_LIMIT )
plt.plot( self.time_data, plt_data, '--' )
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.FIRERATE_LIMIT )
plt.plot( self.time_data, plt_data, '-.' )
plt.hold(False)
# Plot 2 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.legend( ('Weapon', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper right')
plt.ylabel( 'Temperature [K]' )
plt.xlabel( 'Time [s]' )
plt.grid( True )
if filename == None:
plt.show()
else:
plt.savefig( filename )
if __name__ == "__main__":
print("NAEV HeatSim\n")
shp_lst = { 'llama' : 'laser',
'lancelot' : 'ion',
'pacifier' : 'laser turret',
'hawking' : 'ion turret',
'peacemaker' : 'railgun turret' }
for shp,wpn in shp_lst.items():
hs = heatsim( shp, wpn, (60., 120.) )
#hs = heatsim( shp, wpn, frange( 30., 600., 30. ) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_60_60.png' )
hs.setSimulation( (30., 90.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60.png' )
hs.setSimulation( (30., 90., 120., 180.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60_30_60.png' )
print( ' '+shp+' with '+wpn+' done!' )
|
gpl-3.0
|
zodiacnan/Masterarbeit
|
moduls/results/excelwrite.py
|
1
|
3091
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 15 12:55:07 2017
@author: DINGNAN
"""
#write the excel table of Ld-Lq identifikation
import os
os.chdir('C:\\Users\\DINGNAN\\Desktop\\machines\\res\\')
import xlwt
import numpy as np
import matplotlib
from datetime import date,datetime
import pandas as pd
import math
NbrPolePairs = 4.0 #NbrPolePairs = 8
speed = 500.0 #rad/s
fact = speed*360/60
Iseff = 10.9
beta = np.pi/8
Id = []
Iq = []
Is = []
deg = []
Beta = []
Fd = []
Fq = []
Tr = []
Ts = []
Ta = []
T_sim = []
deg_r = []
file_tr = "Tr.dat"
file_ts = "Ts.dat"
file_fd = "Flux_d.dat"
file_fq = "Flux_q.dat"
with open(file_fd, "r") as fig:
count = 0
for line in fig:
count+=1
if count%2 == 0:
data = line.split()
Fd.append((float(data[1])))
else:
pass
with open(file_fq, "r") as fig:
count = 0
for line in fig:
count+=1
if count%2 == 0:
data = line.split()
Fq.append((float(data[1])))
else:
pass
with open(file_ts, "r") as fig:
for line in fig:
data = line.split()
Ts.append((float(data[1])))
with open(file_tr, "r") as fig:
for line in fig:
data = line.split()
Tr.append((float(data[1])))
with open(file_tr, "r") as fig:
for line in fig:
data = line.split()
deg.append((float(fact)*float(data[0])))
deg_r = np.around(deg).astype(int)
def set_style(name,height,bold = True):
style = xlwt.XFStyle()
font = xlwt.Font()
font.name = name
font.bold = bold
font.colour_index = 4
font.height = height
style.font = font
return style
def write_excel():
f = xlwt.Workbook()
col_width = 256*15
sheet_1 = f.add_sheet('LdandLq' , cell_overwrite_ok = True)
row_0 = ['Is_eff','Theta[Last]','Id[A]','Iq[A]','Flux_d[Vs]','Flux_q[Vs]',
'Torque_Air[Nm]', 'T_Simulation[Nm]']
Is = [float(Iseff)]*len(deg_r)
Id_i = Iseff*np.sin(beta)/np.sin(np.pi-np.pi/NbrPolePairs)
Id = [Id_i]*len(deg_r)
Iq_i = Iseff*np.sin(np.pi/NbrPolePairs-beta)/np.sin(np.pi-np.pi/NbrPolePairs)
Iq = [Iq_i]*len(deg_r)
Beta = [beta]*len(deg_r)
Ta = map(lambda (Tr,Ts): (Tr+Ts)/2, zip(Tr, Ts))
T_sim = map(lambda (Fd, Fq, Id, Iq): 1.5*NbrPolePairs*(Fd*Iq-Fq*Id), zip(Fd, Fq, Id,Iq))
for i in range(0,len(row_0)):
sheet_1.col(i).width = col_width
sheet_1.write(0,i,row_0[i],set_style('Time New Roman',220,True))
for j in range(0,len(deg_r)):
sheet_1.write(j+1,0,Is[j])
sheet_1.write(j+1,1,Beta[j])
sheet_1.write(j+1,2,Id[j])
sheet_1.write(j+1,3,Iq[j])
sheet_1.write(j+1,4,Fd[j])
sheet_1.write(j+1,5,Fq[j])
sheet_1.write(j+1,6,Ta[j])
sheet_1.write(j+1,7,T_sim[j])
name = 'LdandLq'+str(int(Iseff))+str(int(beta))+'pm'
f.save(name+'.xls')
df_excel = pd.ExcelFile(name+'.xls')
df = df_excel.parse('LdandLq') # give summary sheet name
df.to_html('LdandLq.html')
print(1)
if __name__ == '__main__':
write_excel()
|
gpl-3.0
|
karpathy/arxiv-sanity-preserver
|
buildsvm.py
|
1
|
2210
|
# standard imports
import os
import sys
import pickle
# non-standard imports
import numpy as np
from sklearn import svm
from sqlite3 import dbapi2 as sqlite3
# local imports
from utils import safe_pickle_dump, strip_version, Config
num_recommendations = 1000 # papers to recommend per user
# -----------------------------------------------------------------------------
if not os.path.isfile(Config.database_path):
print("the database file as.db should exist. You can create an empty database with sqlite3 as.db < schema.sql")
sys.exit()
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = sqldb.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
# -----------------------------------------------------------------------------
# fetch all users
users = query_db('''select * from user''')
print('number of users: ', len(users))
# load the tfidf matrix and meta
meta = pickle.load(open(Config.meta_path, 'rb'))
out = pickle.load(open(Config.tfidf_path, 'rb'))
X = out['X']
X = X.todense().astype(np.float32)
xtoi = { strip_version(x):i for x,i in meta['ptoi'].items() }
user_sim = {}
for ii,u in enumerate(users):
print("%d/%d building an SVM for %s" % (ii, len(users), u['username'].encode('utf-8')))
uid = u['user_id']
lib = query_db('''select * from library where user_id = ?''', [uid])
pids = [x['paper_id'] for x in lib] # raw pids without version
posix = [xtoi[p] for p in pids if p in xtoi]
if not posix:
continue # empty library for this user maybe?
print(pids)
y = np.zeros(X.shape[0])
for ix in posix: y[ix] = 1
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000, tol=1e-6, C=0.1)
clf.fit(X,y)
s = clf.decision_function(X)
sortix = np.argsort(-s)
sortix = sortix[:min(num_recommendations, len(sortix))] # crop paper recommendations to save space
user_sim[uid] = [strip_version(meta['pids'][ix]) for ix in list(sortix)]
print('writing', Config.user_sim_path)
safe_pickle_dump(user_sim, Config.user_sim_path)
|
mit
|
deeplook/bokeh
|
examples/plotting/file/elements.py
|
43
|
1485
|
import pandas as pd
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_file("elements.html", title="elements.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", logo="grey", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"],text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
|
bsd-3-clause
|
tobiasgehring/qudi
|
logic/counter_logic.py
|
1
|
26620
|
# -*- coding: utf-8 -*-
"""
This file contains the Qudi counter logic class.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
from collections import OrderedDict
import numpy as np
import time
import matplotlib.pyplot as plt
from logic.generic_logic import GenericLogic
from interface.slow_counter_interface import CountingMode
from core.util.mutex import Mutex
class CounterLogic(GenericLogic):
""" This logic module gathers data from a hardware counting device.
@signal sigCounterUpdate: there is new counting data available
@signal sigCountContinuousNext: used to simulate a loop in which the data
acquisition runs.
@sigmal sigCountGatedNext: ???
@return error: 0 is OK, -1 is error
"""
sigCounterUpdated = QtCore.Signal()
sigCountDataNext = QtCore.Signal()
sigGatedCounterFinished = QtCore.Signal()
sigGatedCounterContinue = QtCore.Signal(bool)
sigCountingSamplesChanged = QtCore.Signal(int)
sigCountLengthChanged = QtCore.Signal(int)
sigCountFrequencyChanged = QtCore.Signal(float)
sigSavingStatusChanged = QtCore.Signal(bool)
sigCountStatusChanged = QtCore.Signal(bool)
sigCountingModeChanged = QtCore.Signal(CountingMode)
_modclass = 'CounterLogic'
_modtype = 'logic'
## declare connectors
_connectors = {
'counter1': 'SlowCounterInterface',
'savelogic': 'SaveLogic'}
def __init__(self, config, **kwargs):
""" Create CounterLogic object with connectors.
@param dict config: module configuration
@param dict kwargs: optional parameters
"""
super().__init__(config=config, **kwargs)
#locking for thread safety
self.threadlock = Mutex()
self.log.info('The following configuration was found.')
# checking for the right configuration
for key in config.keys():
self.log.info('{0}: {1}'.format(key, config[key]))
# in bins
self._count_length = 300
self._smooth_window_length = 10
self._counting_samples = 1 # oversampling
# in hertz
self._count_frequency = 50
# self._binned_counting = True # UNUSED?
self._counting_mode = CountingMode['CONTINUOUS']
self._saving = False
return
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
# Connect to hardware and save logic
self._counting_device = self.get_connector('counter1')
self._save_logic = self.get_connector('savelogic')
# Recall saved app-parameters
if 'count_length' in self._statusVariables:
self._count_length = self._statusVariables['count_length']
if 'smooth_window_length' in self._statusVariables:
self._smooth_window_length = self._statusVariables['smooth_window_length']
if 'counting_samples' in self._statusVariables:
self._counting_samples = self._statusVariables['counting_samples']
if 'count_frequency' in self._statusVariables:
self._count_frequency = self._statusVariables['count_frequency']
if 'counting_mode' in self._statusVariables:
self._counting_mode = CountingMode[self._statusVariables['counting_mode']]
if 'saving' in self._statusVariables:
self._saving = self._statusVariables['saving']
constraints = self.get_hardware_constraints()
number_of_detectors = constraints.max_detectors
# initialize data arrays
self.countdata = np.zeros([len(self.get_channels()), self._count_length])
self.countdata_smoothed = np.zeros([len(self.get_channels()), self._count_length])
self.rawdata = np.zeros([len(self.get_channels()), self._counting_samples])
self._already_counted_samples = 0 # For gated counting
self._data_to_save = []
# Flag to stop the loop
self.stopRequested = False
self._saving_start_time = time.time()
# connect signals
self.sigCountDataNext.connect(self.count_loop_body, QtCore.Qt.QueuedConnection)
return
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
# Save parameters to disk
self._statusVariables['count_length'] = self._count_length
self._statusVariables['smooth_window_length'] = self._smooth_window_length
self._statusVariables['counting_samples'] = self._counting_samples
self._statusVariables['count_frequency'] = self._count_frequency
self._statusVariables['counting_mode'] = self._counting_mode.name
self._statusVariables['saving'] = self._saving
# Stop measurement
if self.getState() == 'locked':
self._stopCount_wait()
self.sigCountDataNext.disconnect()
return
def get_hardware_constraints(self):
"""
Retrieve the hardware constrains from the counter device.
@return SlowCounterConstraints: object with constraints for the counter
"""
return self._counting_device.get_constraints()
def set_counting_samples(self, samples=1):
"""
Sets the length of the counted bins.
The counter is stopped first and restarted afterwards.
@param int samples: oversampling in units of bins (positive int ).
@return int: oversampling in units of bins.
"""
# Determine if the counter has to be restarted after setting the parameter
if self.getState() == 'locked':
restart = True
else:
restart = False
if samples > 0:
self._stopCount_wait()
self._counting_samples = int(samples)
# if the counter was running, restart it
if restart:
self.startCount()
else:
self.log.warning('counting_samples has to be larger than 0! Command ignored!')
self.sigCountingSamplesChanged.emit(self._counting_samples)
return self._counting_samples
def set_count_length(self, length=300):
""" Sets the time trace in units of bins.
@param int length: time trace in units of bins (positive int).
@return int: length of time trace in units of bins
This makes sure, the counter is stopped first and restarted afterwards.
"""
if self.getState() == 'locked':
restart = True
else:
restart = False
if length > 0:
self._stopCount_wait()
self._count_length = int(length)
# if the counter was running, restart it
if restart:
self.startCount()
else:
self.log.warning('count_length has to be larger than 0! Command ignored!')
self.sigCountLengthChanged.emit(self._count_length)
return self._count_length
def set_count_frequency(self, frequency=50):
""" Sets the frequency with which the data is acquired.
@param float frequency: the desired frequency of counting in Hz
@return float: the actual frequency of counting in Hz
This makes sure, the counter is stopped first and restarted afterwards.
"""
constraints = self.get_hardware_constraints()
if self.getState() == 'locked':
restart = True
else:
restart = False
if constraints.min_count_frequency <= frequency <= constraints.max_count_frequency:
self._stopCount_wait()
self._count_frequency = frequency
# if the counter was running, restart it
if restart:
self.startCount()
else:
self.log.warning('count_frequency not in range! Command ignored!')
self.sigCountFrequencyChanged.emit(self._count_frequency)
return self._count_frequency
def get_count_length(self):
""" Returns the currently set length of the counting array.
@return int: count_length
"""
return self._count_length
#FIXME: get from hardware
def get_count_frequency(self):
""" Returns the currently set frequency of counting (resolution).
@return float: count_frequency
"""
return self._count_frequency
def get_counting_samples(self):
""" Returns the currently set number of samples counted per readout.
@return int: counting_samples
"""
return self._counting_samples
def get_saving_state(self):
""" Returns if the data is saved in the moment.
@return bool: saving state
"""
return self._saving
def start_saving(self, resume=False):
"""
Sets up start-time and initializes data array, if not resuming, and changes saving state.
If the counter is not running it will be started in order to have data to save.
@return bool: saving state
"""
if not resume:
self._data_to_save = []
self._saving_start_time = time.time()
self._saving = True
# If the counter is not running, then it should start running so there is data to save
if self.getState() != 'locked':
self.startCount()
self.sigSavingStatusChanged.emit(self._saving)
return self._saving
def save_data(self, to_file=True, postfix=''):
""" Save the counter trace data and writes it to a file.
@param bool to_file: indicate, whether data have to be saved to file
@param str postfix: an additional tag, which will be added to the filename upon save
@return dict parameters: Dictionary which contains the saving parameters
"""
# stop saving thus saving state has to be set to False
self._saving = False
self._saving_stop_time = time.time()
# write the parameters:
parameters = OrderedDict()
parameters['Start counting time'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._saving_start_time))
parameters['Stop counting time'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._saving_stop_time))
parameters['Count frequency (Hz)'] = self._count_frequency
parameters['Oversampling (Samples)'] = self._counting_samples
parameters['Smooth Window Length (# of events)'] = self._smooth_window_length
if to_file:
# If there is a postfix then add separating underscore
if postfix == '':
filelabel = 'count_trace'
else:
filelabel = 'count_trace_' + postfix
# prepare the data in a dict or in an OrderedDict:
header = 'Time (s)'
for i, detector in enumerate(self.get_channels()):
header = header + ',Signal{0} (counts/s)'.format(i)
data = {header: self._data_to_save}
filepath = self._save_logic.get_path_for_module(module_name='Counter')
fig = self.draw_figure(data=np.array(self._data_to_save))
self._save_logic.save_data(data, filepath=filepath, parameters=parameters,
filelabel=filelabel, plotfig=fig, delimiter='\t')
self.log.info('Counter Trace saved to:\n{0}'.format(filepath))
self.sigSavingStatusChanged.emit(self._saving)
return self._data_to_save, parameters
def draw_figure(self, data):
""" Draw figure to save with data file.
@param: nparray data: a numpy array containing counts vs time for all detectors
@return: fig fig: a matplotlib figure object to be saved to file.
"""
count_data = data[:, 1:len(self.get_channels())+1]
time_data = data[:, 0]
# Scale count values using SI prefix
prefix = ['', 'k', 'M', 'G']
prefix_index = 0
while np.max(count_data) > 1000:
count_data = count_data / 1000
prefix_index = prefix_index + 1
counts_prefix = prefix[prefix_index]
# Use qudi style
plt.style.use(self._save_logic.mpl_qd_style)
# Create figure
fig, ax = plt.subplots()
ax.plot(time_data, count_data, linestyle=':', linewidth=0.5)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Fluorescence (' + counts_prefix + 'c/s)')
return fig
def set_counting_mode(self, mode='CONTINUOUS'):
"""Set the counting mode, to change between continuous and gated counting.
Possible options are:
'CONTINUOUS' = counts continuously
'GATED' = bins the counts according to a gate signal
'FINITE_GATED' = finite measurement with predefined number of samples
@return str: counting mode
"""
constraints = self.get_hardware_constraints()
if self.getState() != 'locked':
if CountingMode[mode] in constraints.counting_mode:
self._counting_mode = CountingMode[mode]
self.log.debug('New counting mode: {}'.format(self._counting_mode))
else:
self.log.warning('Counting mode not supported from hardware. Command ignored!')
self.sigCountingModeChanged.emit(self._counting_mode)
else:
self.log.error('Cannot change counting mode while counter is still running.')
return self._counting_mode
def get_counting_mode(self):
""" Retrieve the current counting mode.
@return str: one of the possible counting options:
'CONTINUOUS' = counts continuously
'GATED' = bins the counts according to a gate signal
'FINITE_GATED' = finite measurement with predefined number of samples
"""
return self._counting_mode
# FIXME: Not implemented for self._counting_mode == 'gated'
def startCount(self):
""" This is called externally, and is basically a wrapper that
redirects to the chosen counting mode start function.
@return error: 0 is OK, -1 is error
"""
# Sanity checks
constraints = self.get_hardware_constraints()
if self._counting_mode not in constraints.counting_mode:
self.log.error('Unknown counting mode "{0}". Cannot start the counter.'
''.format(self._counting_mode))
self.sigCountStatusChanged.emit(False)
return -1
with self.threadlock:
# Lock module
if self.getState() != 'locked':
self.lock()
else:
self.log.warning('Counter already running. Method call ignored.')
return 0
# Set up clock
clock_status = self._counting_device.set_up_clock(clock_frequency=self._count_frequency)
if clock_status < 0:
self.unlock()
self.sigCountStatusChanged.emit(False)
return -1
# Set up counter
if self._counting_mode == CountingMode['FINITE_GATED']:
counter_status = self._counting_device.set_up_counter(counter_buffer=self._count_length)
# elif self._counting_mode == CountingMode['GATED']:
#
else:
counter_status = self._counting_device.set_up_counter()
if counter_status < 0:
self._counting_device.close_clock()
self.unlock()
self.sigCountStatusChanged.emit(False)
return -1
# initialising the data arrays
self.rawdata = np.zeros([len(self.get_channels()), self._counting_samples])
self.countdata = np.zeros([len(self.get_channels()), self._count_length])
self.countdata_smoothed = np.zeros([len(self.get_channels()), self._count_length])
self._sampling_data = np.empty([len(self.get_channels()), self._counting_samples])
# the sample index for gated counting
self._already_counted_samples = 0
# Start data reader loop
self.sigCountStatusChanged.emit(True)
self.sigCountDataNext.emit()
return
def stopCount(self):
""" Set a flag to request stopping counting.
"""
if self.getState() == 'locked':
with self.threadlock:
self.stopRequested = True
return
def count_loop_body(self):
""" This method gets the count data from the hardware for the continuous counting mode (default).
It runs repeatedly in the logic module event loop by being connected
to sigCountContinuousNext and emitting sigCountContinuousNext through a queued connection.
"""
if self.getState() == 'locked':
with self.threadlock:
# check for aborts of the thread in break if necessary
if self.stopRequested:
# close off the actual counter
cnt_err = self._counting_device.close_counter()
clk_err = self._counting_device.close_clock()
if cnt_err < 0 or clk_err < 0:
self.log.error('Could not even close the hardware, giving up.')
# switch the state variable off again
self.stopRequested = False
self.unlock()
self.sigCounterUpdated.emit()
return
# read the current counter value
self.rawdata = self._counting_device.get_counter(samples=self._counting_samples)
if self.rawdata[0, 0] < 0:
self.log.error('The counting went wrong, killing the counter.')
self.stopRequested = True
else:
if self._counting_mode == CountingMode['CONTINUOUS']:
self._process_data_continous()
elif self._counting_mode == CountingMode['GATED']:
self._process_data_gated()
elif self._counting_mode == CountingMode['FINITE_GATED']:
self._process_data_finite_gated()
else:
self.log.error('No valid counting mode set! Can not process counter data.')
# call this again from event loop
self.sigCounterUpdated.emit()
self.sigCountDataNext.emit()
return
def save_current_count_trace(self, name_tag=''):
""" The currently displayed counttrace will be saved.
@param str name_tag: optional, personal description that will be
appended to the file name
@return: dict data: Data which was saved
str filepath: Filepath
dict parameters: Experiment parameters
str filelabel: Filelabel
This method saves the already displayed counts to file and does not
accumulate them. The counttrace variable will be saved to file with the
provided name!
"""
# If there is a postfix then add separating underscore
if name_tag == '':
filelabel = 'snapshot_count_trace'
else:
filelabel = 'snapshot_count_trace_' + name_tag
stop_time = self._count_length / self._count_frequency
time_step_size = stop_time / len(self.countdata)
x_axis = np.arange(0, stop_time, time_step_size)
# prepare the data in a dict or in an OrderedDict:
data = OrderedDict()
chans = self.get_channels()
savearr = np.empty((len(chans) + 1, len(x_axis)))
savearr[0] = x_axis
datastr = 'Time (s)'
for i, ch in enumerate(chans):
savearr[i+1] = self.countdata[i]
datastr += ',Signal {0} (counts/s)'.format(i)
data[datastr] = savearr.transpose()
# write the parameters:
parameters = OrderedDict()
timestr = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(time.time()))
parameters['Saved at time'] = timestr
parameters['Count frequency (Hz)'] = self._count_frequency
parameters['Oversampling (Samples)'] = self._counting_samples
parameters['Smooth Window Length (# of events)'] = self._smooth_window_length
filepath = self._save_logic.get_path_for_module(module_name='Counter')
self._save_logic.save_data(data, filepath=filepath, parameters=parameters,
filelabel=filelabel, delimiter='\t')
self.log.debug('Current Counter Trace saved to: {0}'.format(filepath))
return data, filepath, parameters, filelabel
def get_channels(self):
""" Shortcut for hardware get_counter_channels.
@return list(str): return list of active counter channel names
"""
return self._counting_device.get_counter_channels()
def _process_data_continous(self):
"""
Processes the raw data from the counting device
@return:
"""
for i, ch in enumerate(self.get_channels()):
# remember the new count data in circular array
self.countdata[i, 0] = np.average(self.rawdata[i])
# move the array to the left to make space for the new data
self.countdata = np.roll(self.countdata, -1, axis=1)
# also move the smoothing array
self.countdata_smoothed = np.roll(self.countdata_smoothed, -1, axis=1)
# calculate the median and save it
window = -int(self._smooth_window_length / 2) - 1
for i, ch in enumerate(self.get_channels()):
self.countdata_smoothed[i, window:] = np.median(self.countdata[i,
-self._smooth_window_length:])
# save the data if necessary
if self._saving:
# if oversampling is necessary
if self._counting_samples > 1:
chans = self.get_channels()
self._sampling_data = np.empty([len(chans) + 1, self._counting_samples])
self._sampling_data[0, :] = time.time() - self._saving_start_time
for i, ch in enumerate(chans):
self._sampling_data[i+1, 0] = self.rawdata[i]
self._data_to_save.extend(list(self._sampling_data))
# if we don't want to use oversampling
else:
# append tuple to data stream (timestamp, average counts)
chans = self.get_channels()
newdata = np.empty((len(chans) + 1, ))
newdata[0] = time.time() - self._saving_start_time
for i, ch in enumerate(chans):
newdata[i+1] = self.countdata[i, -1]
self._data_to_save.append(newdata)
return
def _process_data_gated(self):
"""
Processes the raw data from the counting device
@return:
"""
# remember the new count data in circular array
self.countdata[0] = np.average(self.rawdata[0])
# move the array to the left to make space for the new data
self.countdata = np.roll(self.countdata, -1)
# also move the smoothing array
self.countdata_smoothed = np.roll(self.countdata_smoothed, -1)
# calculate the median and save it
self.countdata_smoothed[-int(self._smooth_window_length / 2) - 1:] = np.median(
self.countdata[-self._smooth_window_length:])
# save the data if necessary
if self._saving:
# if oversampling is necessary
if self._counting_samples > 1:
self._sampling_data = np.empty((self._counting_samples, 2))
self._sampling_data[:, 0] = time.time() - self._saving_start_time
self._sampling_data[:, 1] = self.rawdata[0]
self._data_to_save.extend(list(self._sampling_data))
# if we don't want to use oversampling
else:
# append tuple to data stream (timestamp, average counts)
self._data_to_save.append(np.array((time.time() - self._saving_start_time,
self.countdata[-1])))
return
def _process_data_finite_gated(self):
"""
Processes the raw data from the counting device
@return:
"""
if self._already_counted_samples+len(self.rawdata[0]) >= len(self.countdata):
needed_counts = len(self.countdata) - self._already_counted_samples
self.countdata[0:needed_counts] = self.rawdata[0][0:needed_counts]
self.countdata = np.roll(self.countdata, -needed_counts)
self._already_counted_samples = 0
self.stopRequested = True
else:
# replace the first part of the array with the new data:
self.countdata[0:len(self.rawdata[0])] = self.rawdata[0]
# roll the array by the amount of data it had been inserted:
self.countdata = np.roll(self.countdata, -len(self.rawdata[0]))
# increment the index counter:
self._already_counted_samples += len(self.rawdata[0])
return
def _stopCount_wait(self, timeout=5.0):
"""
Stops the counter and waits until it actually has stopped.
@param timeout: float, the max. time in seconds how long the method should wait for the
process to stop.
@return: error code
"""
self.stopCount()
start_time = time.time()
while self.getState() == 'locked':
time.sleep(0.1)
if time.time() - start_time >= timeout:
self.log.error('Stopping the counter timed out after {0}s'.format(timeout))
return -1
return 0
|
gpl-3.0
|
rmkoesterer/uga
|
uga/RunSnvgroupplot.py
|
1
|
27368
|
## Copyright (c) 2015 Ryan Koesterer GNU General Public License v3
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
import scipy.stats as scipy
from uga import Parse
import pysam
import math
from uga import Process
import readline
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
import logging
import re
pandas2ri.activate()
logging.basicConfig(format='%(asctime)s - %(processName)s - %(name)s - %(message)s',level=logging.DEBUG)
logger = logging.getLogger("RunSnvplot")
def RunSnvgroupplot(args):
cfg = Parse.generate_snvgroupplot_cfg(args)
Parse.print_snvgroupplot_options(cfg)
if not cfg['debug']:
logging.disable(logging.CRITICAL)
ro.r('suppressMessages(library(ggplot2))')
ro.r('suppressMessages(library(grid))')
handle=pysam.TabixFile(filename=cfg['file'],parser=pysam.asVCF())
header = [x for x in handle.header]
skip_rows = len(header)-1
cols = header[-1].split()
pcols = cfg['pcol'].split(',')
if cfg['qq_strat']:
if cfg['cmaccol'] not in cols:
cols_extract = ['#chr','start','end','id'] + pcols
print(Process.Error("minor allele count column " + cfg['cmaccol'] + " not found, unable to proceed with minor allele count stratified plots").out)
return 1
else:
cols_extract = ['#chr','start','end','id',cfg['cmaccol']] + pcols
print("minor allele count column " + cfg['cmaccol'] + " found")
else:
cols_extract = ['#chr','start','end','id'] + pcols
print("importing data")
r = pd.read_table(cfg['file'],sep='\t',skiprows=skip_rows,usecols=cols_extract,compression='gzip')
print(str(r.shape[0]) + " total groups found")
for pcol in pcols:
print("plotting p-values for column " + pcol + " ...")
results = r[['#chr','start','end','id',cfg['cmaccol'],pcol]] if cfg['cmaccol'] in cols else r[['#chr','start','end','id',pcol]]
results.dropna(inplace=True)
results = results[(results[pcol] > 0) & (results[pcol] <= 1)]
results.reset_index(drop=True, inplace=True)
print(" " + str(results.shape[0]) + " groups with plottable p-values")
if results.shape[0] > 1:
results['logp'] = -1 * np.log10(results[pcol]) + 0.0
results['pos'] = results.start + (results.end - results.start) / 2
ro.globalenv['results'] = results
l = np.median(scipy.chi2.ppf([1-x for x in results[pcol].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
# in R: median(qchisq(results$p, df=1, lower.tail=FALSE))/qchisq(0.5,1)
print(" genomic inflation (all groups) = " + str(l))
if cfg['qq']:
print(" generating standard qq plot")
print(" minimum p-value: " + str(np.min(results[pcol])))
a = -1 * np.log10(ro.r('ppoints(' + str(len(results.index)) + ')'))
a.sort()
results.sort_values(by=['logp'], inplace=True)
print(" maximum -1*log10(p-value): " + str(np.max(results['logp'])))
ci_upper = -1 * np.log10(scipy.beta.ppf(0.95, list(range(1,len(results[pcol]) + 1)), list(range(len(results[pcol]),0,-1))))
ci_upper.sort()
ci_lower = -1 * np.log10(scipy.beta.ppf(0.05, list(range(1,len(results[pcol]) + 1)), list(range(len(results[pcol]),0,-1))))
ci_lower.sort()
ro.globalenv['df'] = ro.DataFrame({'a': ro.FloatVector(a), 'b': ro.FloatVector(results['logp']), 'ci_lower': ro.FloatVector(ci_lower), 'ci_upper': ro.FloatVector(ci_upper)})
dftext_label = 'lambda %~~% ' + str(l)
ro.globalenv['dftext'] = ro.DataFrame({'x': ro.r('Inf'), 'y': 0.5, 'lab': dftext_label})
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.tiff')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white",horizontal=True)' % (cfg['out'] + '.' + pcol + '.qq.eps')
else:
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.pdf')
ro.r("""
gp<-ggplot(df)
pp<-gp +
aes_string(x='a',y='b') +
geom_ribbon(aes_string(x='a',ymin='ci_lower',ymax='ci_upper'), data=df, alpha=0.25, fill='black') +
geom_point(size=2) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_continuous(expression(Expected~~-log[10](italic(p)))) +
scale_y_continuous(expression(Observed~~-log[10](italic(p)))) +
theme_bw(base_size = 12) +
geom_text(aes_string(x='x', y='y', label='lab'), data = dftext, colour="black", vjust=0, hjust=1, size = 4, parse=TRUE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.position = 'none',
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped standard qq plot")
ro.r('df$b[df$b > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$b == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.cropped.tiff')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white",horizontal=True)' % (cfg['out'] + '.' + pcol + '.qq.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.cropped.pdf')
ro.r("""
gp<-ggplot(df)
pp<-gp +
aes_string(x='a',y='b') +
geom_ribbon(aes_string(x='a',ymin='ci_lower',ymax='ci_upper'), data=df, alpha=0.25, fill='black') +
geom_point(aes(shape=factor(shape)),size=2) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_continuous(expression(Expected~~-log[10](italic(p)))) +
scale_y_continuous(expression(Observed~~-log[10](italic(p)))) +
theme_bw(base_size = 12) +
geom_text(aes_string(x='x', y='y', label='lab'), data = dftext, colour="black", vjust=0, hjust=1, size = 4, parse=TRUE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.position = 'none',
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if cfg['qq_strat']:
print(" generating frequency stratified qq plot")
results['CMAC'] = 'E'
results.loc[results[cfg['cmaccol']] >= 100,'CMAC'] = 'D'
results.loc[(results[cfg['cmaccol']] >= 50) & (results[cfg['cmaccol']] < 100),'CMAC'] = 'C'
results.loc[(results[cfg['cmaccol']] >= 20) & (results[cfg['cmaccol']] < 50),'CMAC'] = 'B'
results.loc[(results[cfg['cmaccol']] >= 10) & (results[cfg['cmaccol']] < 20),'CMAC'] = 'A'
lA='NA'
lB='NA'
lC='NA'
lD='NA'
lE='NA'
lE_n=len(results[pcol][results[cfg['cmaccol']] < 10])
lD_n=len(results[pcol][(results[cfg['cmaccol']] >= 10) & (results[cfg['cmaccol']] < 20)])
lC_n=len(results[pcol][(results[cfg['cmaccol']] >= 20) & (results[cfg['cmaccol']] < 50)])
lB_n=len(results[pcol][(results[cfg['cmaccol']] >= 50) & (results[cfg['cmaccol']] < 100)])
lA_n=len(results[pcol][results[cfg['cmaccol']] >= 100])
if lE_n > 0:
lE=np.median(scipy.chi2.ppf([1-x for x in results[pcol][results[cfg['cmaccol']] < 10].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
if lD_n > 0:
lD=np.median(scipy.chi2.ppf([1-x for x in results[pcol][(results[cfg['cmaccol']] >= 10) & (results[cfg['cmaccol']] < 20)].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
if lC_n > 0:
lC=np.median(scipy.chi2.ppf([1-x for x in results[pcol][(results[cfg['cmaccol']] >= 20) & (results[cfg['cmaccol']] < 50)].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
if lB_n > 0:
lB=np.median(scipy.chi2.ppf([1-x for x in results[pcol][(results[cfg['cmaccol']] >= 50) & (results[cfg['cmaccol']] < 100)].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
if lA_n > 0:
lA=np.median(scipy.chi2.ppf([1-x for x in results[pcol][results[cfg['cmaccol']] >= 100].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
print(" genomic inflation (CMAC > 100, n=" + str(lA_n) + ") = " + str(lA))
print(" genomic inflation (50 <= CMAC < 100, n=" + str(lB_n) + ") = " + str(lB))
print(" genomic inflation (20 <= CMAC < 50, n=" + str(lC_n) + ") = " + str(lC))
print(" genomic inflation (10 <= CMAC < 20, n=" + str(lD_n) + ") = " + str(lD))
print(" genomic inflation (CMAC < 10, n=" + str(lE_n) + ") = " + str(lE))
a = np.array([])
b = np.array([])
c = np.array([])
results.sort_values(by=['logp'], inplace=True)
if len(results[results['CMAC'] == 'E'].index) > 0:
aa = -1 * np.log10(ro.r('ppoints(' + str(len(results[results['CMAC'] == 'E'].index)) + ')'))
aa.sort()
bb = results['logp'][results['CMAC'] == 'E']
#bb.sort()
cc = results['CMAC'][results['CMAC'] == 'E']
a = np.append(a,aa)
b = np.append(b,bb)
c = np.append(c,cc)
print(" minimum p-value (CMAC < 10): " + str(np.min(results[pcol][results['CMAC'] == 'E'])))
print(" maximum -1*log10(p-value) (CMAC < 10): " + str(np.max(results['logp'][results['CMAC'] == 'E'])))
if len(results[results['CMAC'] == 'D'].index) > 0:
aa = -1 * np.log10(ro.r('ppoints(' + str(len(results[results['CMAC'] == 'D'].index)) + ')'))
aa.sort()
bb = results['logp'][results['CMAC'] == 'D']
#bb.sort()
cc = results['CMAC'][results['CMAC'] == 'D']
a = np.append(a,aa)
b = np.append(b,bb)
c = np.append(c,cc)
print(" minimum p-value (10 <= CMAC < 20): " + str(np.min(results[pcol][results['CMAC'] == 'D'])))
print(" maximum -1*log10(p-value) (10 <= CMAC < 20): " + str(np.max(results['logp'][results['CMAC'] == 'D'])))
if len(results[results['CMAC'] == 'C'].index) > 0:
aa = -1 * np.log10(ro.r('ppoints(' + str(len(results[results['CMAC'] == 'C'].index)) + ')'))
aa.sort()
bb = results['logp'][results['CMAC'] == 'C']
#bb.sort()
cc = results['CMAC'][results['CMAC'] == 'C']
a = np.append(a,aa)
b = np.append(b,bb)
c = np.append(c,cc)
print(" minimum p-value (20 <= CMAC < 50): " + str(np.min(results[pcol][results['CMAC'] == 'C'])))
print(" maximum -1*log10(p-value) (20 <= CMAC < 50): " + str(np.max(results['logp'][results['CMAC'] == 'C'])))
if len(results[results['CMAC'] == 'B'].index) > 0:
aa = -1 * np.log10(ro.r('ppoints(' + str(len(results[results['CMAC'] == 'B'].index)) + ')'))
aa.sort()
bb = results['logp'][results['CMAC'] == 'B']
#bb.sort()
cc = results['CMAC'][results['CMAC'] == 'B']
a = np.append(a,aa)
b = np.append(b,bb)
c = np.append(c,cc)
print(" minimum p-value (50 <= CMAC < 100): " + str(np.min(results[pcol][results['CMAC'] == 'B'])))
print(" maximum -1*log10(p-value) (50 <= CMAC < 100): " + str(np.max(results['logp'][results['CMAC'] == 'B'])))
if len(results[results['CMAC'] == 'A'].index) > 0:
aa = -1 * np.log10(ro.r('ppoints(' + str(len(results[results['CMAC'] == 'A'].index)) + ')'))
aa.sort()
bb = results['logp'][results['CMAC'] == 'A']
#bb.sort()
cc = results['CMAC'][results['CMAC'] == 'A']
a = np.append(a,aa)
b = np.append(b,bb)
c = np.append(c,cc)
print(" minimum p-value (CMAC >= 100): " + str(np.min(results[pcol][results['CMAC'] == 'A'])))
print(" maximum -1*log10(p-value) (CMAC >= 100): " + str(np.max(results['logp'][results['CMAC'] == 'A'])))
ro.globalenv['df'] = ro.DataFrame({'a': ro.FloatVector(a), 'b': ro.FloatVector(b), 'CMAC': ro.StrVector(c)})
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat.tiff')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white",horizontal=True)' % (cfg['out'] + '.' + pcol + '.qq_strat.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='a',y='b')) +
geom_point(aes_string(color='CMAC'), size=2) +
scale_colour_manual(values=c("E"="#a8ddb5", "D"="#7bccc4", "C"="#4eb3d3", "B"="#2b8cbe", "A"="#08589e"), labels=c("E"="CMAC < 10","D"="10 <= CMAC < 20","C"="20 <= CMAC < 50","B"="50 <= CMAC < 100","A"="CMAC >= 100")) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_continuous(expression(Expected~~-log[10](italic(p)))) +
scale_y_continuous(expression(Observed~~-log[10](italic(p)))) +
theme_bw(base_size = 12) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=5), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped frequency stratified qq plot")
ro.r('df$b[df$b > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$b == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat.cropped.tiff')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white",horizontal=True)' % (cfg['out'] + '.' + pcol + '.qq_strat.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat.cropped.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='a',y='b')) +
geom_point(aes(shape=factor(shape), color=CMAC), size=2) +
scale_colour_manual(values=c("E"="#a8ddb5", "D"="#7bccc4", "C"="#4eb3d3", "B"="#2b8cbe", "A"="#08589e"), labels=c("E"="CMAC < 10","D"="10 <= CMAC < 20","C"="20 <= CMAC < 50","B"="50 <= CMAC < 100","A"="CMAC >= 100")) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_continuous(expression(Expected~~-log[10](italic(p)))) +
scale_y_continuous(expression(Observed~~-log[10](italic(p)))) +
theme_bw(base_size = 12) +
guides(shape=FALSE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=5), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if cfg['mht']:
print(" generating standard manhattan plot")
print(" minimum p-value: " + str(np.min(results[pcol])))
print(" maximum -1*log10(p-value): " + str(np.max(results['logp'])))
if cfg['gc']:
print(" adjusting p-values for genomic inflation for p-value column " + pcol)
results[pcol]=2 * scipy.norm.cdf(-1 * np.abs(scipy.norm.ppf(0.5*results[pcol]) / math.sqrt(l)))
print(" minimum post-gc adjustment p-value: " + str(np.min(results[pcol])))
print(" maximum post-gc adjustment -1*log10(p-value): " + str(np.max(results['logp'])))
else:
print(" skipping genomic inflation correction")
print(" calculating genomic positions")
results.sort_values(by=['#chr','pos'], inplace=True)
ticks = []
lastbase = 0
results['gpos'] = 0
nchr = len(list(np.unique(results['#chr'].values)))
chrs = np.unique(results['#chr'].values)
if cfg['color']:
colours = ["#08306B","#41AB5D","#000000","#F16913","#3F007D","#EF3B2C","#08519C","#238B45","#252525","#D94801","#54278F","#CB181D","#2171B5","#006D2C","#525252","#A63603","#6A51A3","#A50F15","#4292C6","#00441B","#737373","#7F2704","#807DBA","#67000D"]
else:
colours = ["#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3"]
if nchr == 1:
results['gpos'] = results['pos']
results['colours'] = "#08589e"
if results['gpos'].max() - results['gpos'].min() <= 1000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 100 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 10000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 1000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 100000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 10000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 200000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 20000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 300000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 30000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 400000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 40000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 500000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 50000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 600000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 60000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 700000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 70000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 800000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 80000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 900000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 90000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 1000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 100000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 10000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 1000000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 100000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 10000000 == 0]
elif results['gpos'].max() - results['gpos'].min() > 100000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 25000000 == 0]
else:
results['colours'] = "#000000"
for i in range(len(chrs)):
print(" processed chromosome " + str(int(chrs[i])))
if i == 0:
results.loc[results['#chr'] == chrs[i],'gpos'] = results.loc[results['#chr'] == chrs[i],'pos']
else:
if results.loc[results['#chr'] == chrs[i-1]].shape[0] > 1:
lastbase = lastbase + results.loc[results['#chr'] == chrs[i-1],'pos'].iloc[-1]
else:
lastbase = lastbase + results.loc[results['#chr'] == chrs[i-1],'pos'].iloc[0]
results.loc[results['#chr'] == chrs[i],'gpos'] = (results.loc[results['#chr'] == chrs[i],'pos']) + lastbase
if results.loc[results['#chr'] == chrs[i]].shape[0] > 1:
ticks.append(results.loc[results['#chr'] == chrs[i],'gpos'].iloc[0] + (results.loc[results['#chr'] == chrs[i],'gpos'].iloc[-1] - results.loc[results['#chr'] == chrs[i],'gpos'].iloc[0])/2)
else:
ticks.append(results.loc[results['#chr'] == chrs[i],'gpos'].iloc[0])
results.loc[results['#chr'] == chrs[i],'colours'] = colours[int(chrs[i])]
results['logp'] = -1 * np.log10(results[pcol])
if results.shape[0] >= 1000000:
sig = 5.4e-8
else:
sig = 0.05 / results.shape[0]
print(" significance level set to p-value = " + str(sig) + " (-1*log10(p-value) = " + str(-1 * np.log10(sig)) + ")")
chr = results['#chr'][0]
maxy=int(max(np.ceil(-1 * np.log10(sig)),np.ceil(results['logp'].max())))
if maxy > 20:
y_breaks = list(range(0,maxy,5))
y_labels = list(range(0,maxy,5))
else:
y_breaks = list(range(0,maxy))
y_labels = list(range(0,maxy))
ro.globalenv['df'] = ro.DataFrame({'gpos': ro.FloatVector(results['gpos']), 'logp': ro.FloatVector(results['logp']), 'colours': ro.FactorVector(results['colours'])})
ro.globalenv['ticks'] = ro.FloatVector(ticks)
ro.globalenv['labels'] = ro.Vector(["{:,}".format(x/1000) for x in ticks])
ro.globalenv['colours'] = ro.StrVector(colours)
ro.globalenv['chrs'] = ro.FloatVector(chrs)
print(" generating manhattan plot")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=12,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.tiff')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=12,height=4,bg="white",horizontal=True)' % (cfg['out'] + '.' + pcol + '.mht.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=12,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.pdf')
if nchr == 1:
ro.r("""
gp<-ggplot(df, aes_string(x='gpos',y='logp')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(size=1.5) +
scale_x_continuous(expression(Chromosome~~%d~~(kb))'),breaks=ticks,labels=labels) + \
scale_y_continuous(expression(-log[10](italic(p))),limits=c(0,%d)) + \
theme_bw(base_size = 8) + \
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=8), legend.position = 'none', axis.text = element_text(size=12))
%s
""" % (sig, chr, maxy, ggsave))
else:
ro.r("""
gp = ggplot(df, aes_string(x='gpos',y='logp',colour='colours')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(size=1.5) +
scale_colour_manual(values=colours) +
scale_x_continuous(expression(Chromosome),breaks=ticks,labels=chrs) +
scale_y_continuous(expression(-log[10](italic(p))),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=8),
axis.text = element_text(size=6), legend.position = 'none', axis.text = element_text(size=12))
%s
""" % (sig, maxy, ggsave))
if maxy > cfg['crop']:
maxy = cfg['crop']
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
print(" generating cropped manhattan plot")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=12,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.cropped.tiff')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=12,height=4,bg="white",horizontal=True)' % (cfg['out'] + '.' + pcol + '.mht.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=12,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.cropped.pdf')
if nchr == 1:
ro.r("""
gp<-ggplot(df, aes_string(x='gpos',y='logp')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(aes(shape=factor(shape)),size=1.5) +
scale_x_continuous(expression(Chromosome~~%d~~(kb))'),breaks=ticks,labels=labels) +
scale_y_continuous(expression(-log[10](italic(p))),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=8), legend.position = 'none', axis.text = element_text(size=12))
%s
""" % (sig, chr, maxy, ggsave))
else:
ro.r("""
gp = ggplot(df, aes_string(x='gpos',y='logp',colour='colours')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(aes(shape=factor(shape)),size=1.5) +
scale_colour_manual(values=colours) +
scale_x_continuous(expression(Chromosome),breaks=ticks,labels=chrs) +
scale_y_continuous(expression(-log[10](italic(p))),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=8),
axis.text = element_text(size=6), legend.position = 'none', axis.text = element_text(size=12))
%s
""" % (sig, maxy, ggsave))
print("process complete")
return 0
|
gpl-3.0
|
vigilv/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
econ-ark/PARK
|
source/SciPy2018/make-scipy-plots-GE.py
|
1
|
3906
|
import sys
import os
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../FashionVictim'))
sys.path.insert(0, os.path.abspath('./'))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from HARKutilities import plotFuncsDer, plotFuncs
from FashionVictimModel import *
from HARKcore import AgentType, Solution, NullFunc
from HARKinterpolation import LinearInterp
from HARKutilities import approxUniform, plotFuncs
import numpy as np
import scipy.stats as stats
import FashionVictimParams as Params
from copy import copy
from time import clock
from HARKcore import Market
mystr = lambda number : "{:.4f}".format(number)
import matplotlib.pyplot as plt
from copy import deepcopy
do_many_types = True
# Make a test case and solve the micro model
TestType = FashionVictimType(**Params.default_params)
print('Utility function:')
plotFuncs(TestType.conformUtilityFunc,0,1)
t_start = clock()
TestType.solve()
t_end = clock()
print('Solving a fashion victim micro model took ' + mystr(t_end-t_start) + ' seconds.')
'''
print('Jock value function:')
plotFuncs(TestType.VfuncJock,0,1)
print('Punk value function:')
plotFuncs(TestType.VfuncPunk,0,1)
print('Jock switch probability:')
plotFuncs(TestType.switchFuncJock,0,1)
print('Punk switch probability:')
plotFuncs(TestType.switchFuncPunk,0,1)
'''
# Make a list of different types
AltType = deepcopy(TestType)
AltType(uParamA = Params.uParamB, uParamB = Params.uParamA, seed=20)
AltType.update()
AltType.solve()
type_list = [TestType,AltType]
u_vec = np.linspace(0.02,0.1,5)
if do_many_types:
for j in range(u_vec.size):
ThisType = deepcopy(TestType)
ThisType(punk_utility=u_vec[j])
ThisType.solve()
type_list.append(ThisType)
ThisType = deepcopy(AltType)
ThisType(punk_utility=u_vec[j])
ThisType.solve()
type_list.append(ThisType)
for j in range(u_vec.size):
ThisType = deepcopy(TestType)
ThisType(jock_utility=u_vec[j])
ThisType.solve()
type_list.append(ThisType)
ThisType = deepcopy(AltType)
ThisType(jock_utility=u_vec[j])
ThisType.solve()
type_list.append(ThisType)
# Now run the simulation inside a Market
TestMarket = Market(agents = type_list,
sow_vars = ['pNow'],
reap_vars = ['sNow'],
track_vars = ['pNow'],
dyn_vars = ['pNextIntercept','pNextSlope','pNextWidth'],
millRule = calcPunkProp,
calcDynamics = calcFashionEvoFunc,
act_T = 1000,
tolerance = 0.01)
TestMarket.pNow_init = 0.5
TestMarket.solve()
plt.plot(TestMarket.pNow_hist)
"Proportion of punks in the population."
plt.show()
pPunks_smooth = pd.rolling_mean(pPunks,25)
plt.plot(pPunks_smooth, label="Punks")
plt.ylim([0.46,0.54])
plt.axhline(y=.5, linewidth=0.8)
#plt.title("Proportion of punks in the population")
plt.ylabel("Fraction")
#plt.figtext(x=0.01, y=0.005, s="*Smoothed with 25-period moving average.")
plt.xlabel("Time")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#plt.tight_layout()
plt.savefig("./fraction_of_punks.png")
plt.savefig("../../dissertation/HACK_docs_pandoc/fraction_of_punks.png")
plt.show()
'''
pPunks = np.array(TestMarket.pNow_hist[1:])
pJocks = 1.0 - pPunks
plt.plot(pJocks, label="Jocks")
plt.plot(pPunks, label="Punks")
plt.show()
pPunks = np.array(TestMarket.pNow_hist[1:])
pJocks = 1.0 - pPunks
plt.plot(pJocks, label="Jocks")
plt.ylim([0.42,0.58])
plt.axhline(y=.5, linewidth=0.8)
#plt.axhline(y=0.002, xmin=0, xmax=1, hold=None)
plt.show()
pPunks_smooth_a = pd.rolling_mean(pPunks,20)
plt.plot(pPunks_smooth, label="Punks")
plt.ylim([0.4,0.6])
plt.axhline(y=.5, linewidth=0.8)
plt.show()
'''
|
apache-2.0
|
ahoyosid/scikit-learn
|
sklearn/utils/extmath.py
|
142
|
21102
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
|
bsd-3-clause
|
Sklearn-HMM/scikit-learn-HMM
|
sklean-hmm/tests/test_multiclass.py
|
3
|
13721
|
import numpy as np
import warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent
X = np.ones((10, 2))
X[:5, :] = 0
y = [[int(i >= 5), 2, 3] for i in range(10)]
with warnings.catch_warnings(record=True):
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
#y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.65, 0.74), (0.72, 0.84)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = [tuple(l.nonzero()[0]) for l in (Y_proba > 0.5)]
assert_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).nonzero()[1],
np.hstack(clf.predict(X_test)))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 1)
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], (1 + i) % 3)
def test_ovo_string_y():
"Test that the OvO doesn't screw the encoding of string labels"
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
svc = LinearSVC()
ovo = OneVsOneClassifier(svc)
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
|
bsd-3-clause
|
liufuyang/deep_learning_tutorial
|
Deep-Learning-A-Z/Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Section 4 - Building an ANN/classification_template.py
|
37
|
2538
|
# Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
mit
|
Joel-U/sparkle
|
setup.py
|
2
|
1397
|
import sys
import os
import glob
from setuptools import setup, find_packages
setup(name="sparkle",
version='0.2.0',
description="Sound Presentation And Recording Kit for Laboratories of Electrophysiology",
url="https://github.com/portfors-lab/sparkle",
author='Amy Boyle',
author_email="[email protected]",
license="GPLv3",
packages=find_packages(exclude=['test', 'doc']),
install_requires=[
'numpy',
'matplotlib',
'scipy',
'PyYAML',
'h5py',
'pyqtgraph',
'Sphinx',
'sphinx-rtd-theme',
'pydaqmx',
],
package_data={'':['*.conf', '*.jpg', '*.png', "*.ico"]},
entry_points={'console_scripts':['sparkle=sparkle.gui.run:main']},
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
]
)
|
gpl-3.0
|
mikofski/pvlib-python
|
pvlib/tests/test_location.py
|
1
|
12499
|
import datetime
from unittest.mock import ANY
import numpy as np
from numpy import nan
import pandas as pd
from conftest import assert_frame_equal, assert_index_equal
import pytest
import pytz
from pytz.exceptions import UnknownTimeZoneError
import pvlib
from pvlib.location import Location
from pvlib.solarposition import declination_spencer71
from pvlib.solarposition import equation_of_time_spencer71
from test_solarposition import expected_solpos, golden, golden_mst
from conftest import requires_ephem, requires_tables, fail_on_pvlib_version
def test_location_required():
Location(32.2, -111)
def test_location_all():
Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
@pytest.mark.parametrize('tz', [
pytz.timezone('US/Arizona'), 'America/Phoenix', -7, -7.0,
datetime.timezone.utc
])
def test_location_tz(tz):
Location(32.2, -111, tz)
def test_location_invalid_tz():
with pytest.raises(UnknownTimeZoneError):
Location(32.2, -111, 'invalid')
def test_location_invalid_tz_type():
with pytest.raises(TypeError):
Location(32.2, -111, [5])
def test_location_print_all():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
expected_str = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__str__() == expected_str
def test_location_print_pytz():
tus = Location(32.2, -111, pytz.timezone('US/Arizona'), 700, 'Tucson')
expected_str = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__str__() == expected_str
@pytest.fixture
def times():
return pd.date_range(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
@requires_tables
def test_get_clearsky(mocker, times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
m = mocker.spy(pvlib.clearsky, 'ineichen')
out = tus.get_clearsky(times)
assert m.call_count == 1
assert_index_equal(out.index, times)
# check that values are 0 before sunrise and after sunset
assert out.iloc[0, :].sum().sum() == 0
assert out.iloc[-1:, :].sum().sum() == 0
# check that values are > 0 during the day
assert (out.iloc[1:-1, :] > 0).all().all()
assert (out.columns.values == ['ghi', 'dni', 'dhi']).all()
def test_get_clearsky_ineichen_supply_linke(mocker):
tus = Location(32.2, -111, 'US/Arizona', 700)
times = pd.date_range(start='2014-06-24-0700', end='2014-06-25-0700',
freq='3h')
mocker.spy(pvlib.clearsky, 'ineichen')
out = tus.get_clearsky(times, linke_turbidity=3)
# we only care that the LT is passed in this test
pvlib.clearsky.ineichen.assert_called_once_with(ANY, ANY, 3, ANY, ANY)
assert_index_equal(out.index, times)
# check that values are 0 before sunrise and after sunset
assert out.iloc[0:2, :].sum().sum() == 0
assert out.iloc[-2:, :].sum().sum() == 0
# check that values are > 0 during the day
assert (out.iloc[2:-2, :] > 0).all().all()
assert (out.columns.values == ['ghi', 'dni', 'dhi']).all()
def test_get_clearsky_haurwitz(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
clearsky = tus.get_clearsky(times, model='haurwitz')
expected = pd.DataFrame(data=np.array(
[[ 0. ],
[ 242.30085588],
[ 559.38247117],
[ 384.6873791 ],
[ 0. ]]),
columns=['ghi'],
index=times)
assert_frame_equal(expected, clearsky)
def test_get_clearsky_simplified_solis(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
clearsky = tus.get_clearsky(times, model='simplified_solis')
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 70.00146271, 638.01145669, 236.71136245],
[ 101.69729217, 852.51950946, 577.1117803 ],
[ 86.1679965 , 755.98048017, 385.59586091],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_apparent_elevation(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
solar_position = {'apparent_elevation': pd.Series(80, index=times),
'apparent_zenith': pd.Series(10, index=times)}
clearsky = tus.get_clearsky(times, model='simplified_solis',
solar_position=solar_position)
expected = pd.DataFrame(data=np.
array([[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_dni_extra(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
clearsky = tus.get_clearsky(times, model='simplified_solis',
dni_extra=1370)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 67.82281485, 618.15469596, 229.34422063],
[ 98.53217848, 825.98663808, 559.15039353],
[ 83.48619937, 732.45218243, 373.59500313],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky)
def test_get_clearsky_simplified_solis_pressure(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
clearsky = tus.get_clearsky(times, model='simplified_solis',
pressure=95000)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 70.20556637, 635.53091983, 236.17716435],
[ 102.08954904, 850.49502085, 576.28465815],
[ 86.46561686, 753.70744638, 384.90537859],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_aod_pw(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
clearsky = tus.get_clearsky(times, model='simplified_solis',
aod700=0.25, precipitable_water=2.)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 85.77821205, 374.58084365, 179.48483117],
[ 143.52743364, 625.91745295, 490.06254157],
[ 114.63275842, 506.52275195, 312.24711495],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_valueerror(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
with pytest.raises(ValueError):
tus.get_clearsky(times, model='invalid_model')
def test_from_tmy_3():
from test_tmy import TMY3_TESTFILE
from pvlib.iotools import read_tmy3
data, meta = read_tmy3(TMY3_TESTFILE)
loc = Location.from_tmy(meta, data)
assert loc.name is not None
assert loc.altitude != 0
assert loc.tz != 'UTC'
assert_frame_equal(loc.weather, data)
def test_from_tmy_2():
from test_tmy import TMY2_TESTFILE
from pvlib.iotools import read_tmy2
data, meta = read_tmy2(TMY2_TESTFILE)
loc = Location.from_tmy(meta, data)
assert loc.name is not None
assert loc.altitude != 0
assert loc.tz != 'UTC'
assert_frame_equal(loc.weather, data)
def test_from_epw():
from test_epw import epw_testfile
from pvlib.iotools import read_epw
data, meta = read_epw(epw_testfile)
loc = Location.from_epw(meta, data)
assert loc.name is not None
assert loc.altitude != 0
assert loc.tz != 'UTC'
assert_frame_equal(loc.weather, data)
def test_get_solarposition(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = golden_mst.get_solarposition(times, temperature=11)
ephem_data = np.round(ephem_data, 3)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 3)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_get_airmass(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
airmass = tus.get_airmass(times)
expected = pd.DataFrame(data=np.array(
[[ nan, nan],
[ 3.61046506, 3.32072602],
[ 1.76470864, 1.62309115],
[ 2.45582153, 2.25874238],
[ nan, nan]]),
columns=['airmass_relative', 'airmass_absolute'],
index=times)
assert_frame_equal(expected, airmass)
airmass = tus.get_airmass(times, model='young1994')
expected = pd.DataFrame(data=np.array(
[[ nan, nan],
[ 3.6075018 , 3.31800056],
[ 1.7641033 , 1.62253439],
[ 2.45413091, 2.25718744],
[ nan, nan]]),
columns=['airmass_relative', 'airmass_absolute'],
index=times)
assert_frame_equal(expected, airmass)
def test_get_airmass_valueerror(times):
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
with pytest.raises(ValueError):
tus.get_airmass(times, model='invalid_model')
def test_Location___repr__():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
expected = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__repr__() == expected
@requires_ephem
def test_get_sun_rise_set_transit(golden):
times = pd.DatetimeIndex(['2015-01-01 07:00:00', '2015-01-01 23:00:00'],
tz='MST')
result = golden.get_sun_rise_set_transit(times, method='pyephem')
assert all(result.columns == ['sunrise', 'sunset', 'transit'])
result = golden.get_sun_rise_set_transit(times, method='spa')
assert all(result.columns == ['sunrise', 'sunset', 'transit'])
dayofyear = 1
declination = declination_spencer71(dayofyear)
eot = equation_of_time_spencer71(dayofyear)
result = golden.get_sun_rise_set_transit(times, method='geometric',
declination=declination,
equation_of_time=eot)
assert all(result.columns == ['sunrise', 'sunset', 'transit'])
def test_get_sun_rise_set_transit_valueerror(golden):
times = pd.DatetimeIndex(['2015-01-01 07:00:00', '2015-01-01 23:00:00'],
tz='MST')
with pytest.raises(ValueError):
golden.get_sun_rise_set_transit(times, method='eyeball')
def test_extra_kwargs():
with pytest.raises(TypeError, match='arbitrary_kwarg'):
Location(32.2, -111, arbitrary_kwarg='value')
|
bsd-3-clause
|
searchs/bigdatabox
|
nasa_weblogs.py
|
1
|
5867
|
#!/usr/bin/env python
import sys
import os
import re
import pandas as pd
# import modin.pandas as pd #replcaing basic Pandas with faster modin
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
from pyspark.sql.context import SQLContext
from pyspark.sql.functions import udf
import glob
sc = SparkContext()
sqlContext = SQLContext(sc)
spark = SparkSession(sc)
raw_data = glob.glob("data/*.log")
df = spark.read.text(raw_data)
df.printSchema()
df.show(5, truncate=False)
sample_logs = [item['value'] for item in df.take(15)]
# EXTRACT HOSTS
host_pattern = r'(^\S+\.[\S+\.]+\S+)\s'
hosts = [re.search(host_pattern, item).group(1)
if re.search(host_pattern, item)
else 'no match'
for item in sample_logs]
# EXTRACT TIMESTAMPS
ts_pattern = r'\[(\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2} -\d{4})]'
timestamps = [re.search(ts_pattern, item).group(1) for item in sample_logs]
# EXTRACT HTTP METHODS/PROTOCOLS
method_uri_protocol_pattern = r'\"(\S+)\s(\S+)\s*(\S*)\"'
method_uri_protocol = [re.search(method_uri_protocol_pattern, item).groups()
if re.search(method_uri_protocol_pattern, item)
else 'no match'
for item in sample_logs]
# EXTRACT STATUS CODES
status_pattern = r'\s(\d{3})\s'
status = [re.search(status_pattern, item).group(1) for item in sample_logs]
# EXTRACT HTTP RESPONSE CONTENT SIZE
content_size_pattern = r'\s(\d+)$'
content_size = [re.search(content_size_pattern, item).group(1) for item in sample_logs]
# COMBINED ALGO
from pyspark.sql.functions import regexp_extract
logs_df = df.select(regexp_extract('value', host_pattern, 1).alias('host'),
regexp_extract('value', ts_pattern, 1).alias('timestamp'),
regexp_extract('value', method_uri_protocol_pattern, 1).alias('method'),
regexp_extract('value', method_uri_protocol_pattern, 2).alias('endpoint'),
regexp_extract('value', method_uri_protocol_pattern, 3).alias('protocol'),
regexp_extract('value', status_pattern, 1).cast('integer').alias('status'),
regexp_extract('value', content_size_pattern, 1).cast('integer').alias('content_size'))
logs_df.show(10, truncate=True)
print((logs_df.count(), len(logs_df.columns)))
# CHECK NULL COLUMN COUNT
(df.filter(df['value'].isNull()).count())
bad_rows_df = logs_df.filter(logs_df['host'].isNull()|
logs_df['timestamp'].isNull() |
logs_df['method'].isNull() |
logs_df['endpoint'].isNull() |
logs_df['status'].isNull() |
logs_df['content_size'].isNull()|
logs_df['protocol'].isNull())
bad_rows_df.count()
# GET COLUMNS WITH NULLS
from pyspark.sql.functions import col
from pyspark.sql.functions import sum as spark_sum
def count_null(col_name):
return spark_sum(col(col_name).isNull().cast('integer')).alias(col_name)
# Build up a list of column expressions, one per column.
exprs = [count_null(col_name) for col_name in logs_df.columns]
# Run the aggregation. The *exprs converts the list of expressions into
# variable function arguments.
logs_df.agg(*exprs).show()
# HANDLE NULLS IN COLUMN - HTTP STATUS
regexp_extract('value', r'\s(\d{3})\s', 1).cast('integer').alias( 'status')
null_status_df = df.filter(~df['value'].rlike(r'\s(\d{3})\s'))
null_status_df.count()
null_status_df.show(truncate=False)
bad_status_df = null_status_df.select(regexp_extract('value', host_pattern, 1).alias('host'),
regexp_extract('value', ts_pattern, 1).alias('timestamp'),
regexp_extract('value', method_uri_protocol_pattern, 1).alias('method'),
regexp_extract('value', method_uri_protocol_pattern, 2).alias('endpoint'),
regexp_extract('value', method_uri_protocol_pattern, 3).alias('protocol'),
regexp_extract('value', status_pattern, 1).cast('integer').alias('status'),
regexp_extract('value', content_size_pattern, 1).cast('integer').alias('content_size'))
bad_status_df.show(truncate=False)
logs_df = logs_df[logs_df['status'].isNotNull()]
exprs = [count_null(col_name) for col_name in logs_df.columns]
logs_df.agg(*exprs).show()
regexp_extract('value', r'\s(\d+)$', 1).cast('integer').alias('content_size')
null_content_size_df = df.filter(~df['value'].rlike(r'\s\d+$'))
null_content_size_df.count()
null_content_size_df.take(10)
logs_df = logs_df.na.fill({'content_size': 0})
exprs = [count_null(col_name) for col_name in logs_df.columns]
logs_df.agg(*exprs).show()
month_map = {
'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12
}
def parse_clf_time(text):
""" Convert Common Log time format into a Python datetime object
Args:
text (str): date and time in Apache time format [dd/mmm/yyyy:hh:mm:ss (+/-)zzzz]
Returns:
a string suitable for passing to CAST('timestamp')
"""
# NOTE: We're ignoring the time zones here, might need to be handled depending on the problem you are solving
return "{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}".format(
int(text[7:11]),
month_map[text[3:6]],
int(text[0:2]),
int(text[12:14]),
int(text[15:17]),
int(text[18:20])
)
udf_parse_time = udf(parse_clf_time)
logs_df = (logs_df\
.select('*', udf_parse_time(logs_df['timestamp'])\
.cast('timestamp')\
.alias('time'))\
.drop('timestamp'))
logs_df.show(10, truncate=True)
logs_df.printSchema()
logs_df.cache()
|
mit
|
noinil/pinang
|
utils/pwm_seq_energy.py
|
1
|
3268
|
#!/usr/bin/env python3
from Bio import SeqIO
import numpy as np
import matplotlib.pyplot as plt
def read_sequence(fasta_file_name):
"""Read DNA sequence from .fasta file.
Keyword Arguments:
fasta_file -- sequence file in the format of .fasta
"""
fasta_sequences = SeqIO.parse(open(fasta_file_name),'fasta')
dna_fasta = list(fasta_sequences)[0]
return dna_fasta
def read_pwm(pwm_file_name):
"""Read position weight matrix from pwm_file.
Keyword Arguments:
pwm_file_name --
"""
pwm = {}
with open(pwm_file_name, 'r') as pwm_fin:
for line in pwm_fin:
words = line.split()
base_type = line[0]
enescore = []
for v in words[1:]:
enescore.append(float(v))
pwm[base_type] = enescore[:]
return pwm.copy()
def main(fasta_file_name, pwm_file_name):
# ------------------------------ read fasta ------------------------------
dna_fasta = read_sequence(fasta_file_name)
dna_name, dna_sequence = dna_fasta.id, dna_fasta.seq
reverse_dna_sequence = dna_sequence.reverse_complement()
# print(dna_name, dna_sequence)
# print(dna_sequence.reverse_complement())
dna_len = len(dna_sequence)
# ------------------------------ read pwm ------------------------------
pwm = read_pwm(pwm_file_name)
pwm_len = len(pwm['A'])
# ------------------------------ pwm score calculation ------------------------------
def calculate_pwm_energy(seq_piece, pwm0):
score = 0
for i, b in enumerate(seq_piece):
s = pwm0[b][i]
score += s * 0.593
return score
# ------------------------------ simple test ------------------------------
shifting_score = []
shifting_score_reverse = []
if dna_len < pwm_len:
print("ERROR: DNA length < PWM length!!! STOP here.")
return 1
piece_start, piece_end = 0, pwm_len
while piece_end <= dna_len:
dna_piece = dna_sequence[piece_start : piece_end]
reverse_dna_piece = dna_piece.reverse_complement()
piece_score = calculate_pwm_energy(dna_piece, pwm)
reverse_piece_score = calculate_pwm_energy(reverse_dna_piece, pwm)
# print(piece_start, dna_piece, reverse_dna_piece, piece_score, reverse_piece_score)
shifting_score.append(piece_score)
shifting_score_reverse.append(reverse_piece_score)
piece_start += 1
piece_end += 1
# ------------------------------ plotting ------------------------------
fig, axes = plt.subplots(1, 1, figsize=(12, 8))
X = [i for i in range(dna_len - pwm_len + 1)]
axes.plot(X, shifting_score, 'r')
axes.plot(X, shifting_score_reverse, 'g')
axes.set_xlim(0, dna_len - pwm_len)
# axes.set_ylim(-12, 10)
# axes.set_xticks()
# axes.set_yticks()
# axes.text(x=0, y=0, s=str(dna_sequence), family='monospace', fontsize=4.6)
# axes.text(x=0, y=0.5, s=str(reverse_dna_sequence), family='monospace', fontsize=4.6)
plt.show()
if __name__ == '__main__':
import sys
def print_usage():
usage = sys.argv[0] + " seq.fasta tf.pwm"
fasta_file_name = sys.argv[1]
pwm_file_name = sys.argv[2]
main(fasta_file_name, pwm_file_name)
|
gpl-2.0
|
acmaheri/sms-tools
|
lectures/6-Harmonic-model/plots-code/spectral-peaks-and-f0.py
|
2
|
1040
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, hN, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(N/2)/float(N)
plt.figure(1, figsize=(9, 5))
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
harms = np.arange(1,20)*440.0
plt.vlines(harms, -80, max(mX)+1, color='g', lw=1.5)
plt.title('mX + peaks + f0 multiples (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('spectral-peaks-and-f0.png')
plt.show()
|
agpl-3.0
|
nhejazi/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
7
|
64758
|
"""
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn.tree.tree import CRITERIA_CLF
from sklearn.tree.tree import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(reg.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=5000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=3.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=2.5).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_impurity_decrease=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets,
sparse=False):
"""Test the interaction between min_weight_fraction_leaf and min_samples_leaf
when sample_weights is not provided in fit."""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight *
est.min_weight_fraction_leaf), 5),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=.1,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf)),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
def test_min_weight_fraction_leaf_with_min_samples_leaf():
# Check on dense input
for name in ALL_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "iris")
# Check on sparse input
for name in SPARSE_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "multilabel", True)
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_true(est.min_impurity_split is None,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
try:
assert_warns(DeprecationWarning, est.fit, X, y)
except AssertionError:
pass
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using
# min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
assert_warns_message(DeprecationWarning,
"Use the min_impurity_decrease",
est.fit, X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_min_impurity_decrease():
# test if min_impurity_decrease ensure that a split is made only if
# if the impurity decrease is atleast that value
X, y = datasets.make_classification(n_samples=10000, random_state=42)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# Check default value of min_impurity_decrease, 1e-7
est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0)
# Check with explicit value of 0.05
est2 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.05, random_state=0)
# Check with a much lower value of 0.0001
est3 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.0001, random_state=0)
# Check with a much lower value of 0.1
est4 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.1, random_state=0)
for est, expected_decrease in ((est1, 1e-7), (est2, 0.05),
(est3, 0.0001), (est4, 0.1)):
assert_less_equal(est.min_impurity_decrease, expected_decrease,
"Failed, min_impurity_decrease = {0} > {1}"
.format(est.min_impurity_decrease,
expected_decrease))
est.fit(X, y)
for node in range(est.tree_.node_count):
# If current node is a not leaf node, check if the split was
# justified w.r.t the min_impurity_decrease
if est.tree_.children_left[node] != TREE_LEAF:
imp_parent = est.tree_.impurity[node]
wtd_n_node = est.tree_.weighted_n_node_samples[node]
left = est.tree_.children_left[node]
wtd_n_left = est.tree_.weighted_n_node_samples[left]
imp_left = est.tree_.impurity[left]
wtd_imp_left = wtd_n_left * imp_left
right = est.tree_.children_right[node]
wtd_n_right = est.tree_.weighted_n_node_samples[right]
imp_right = est.tree_.impurity[right]
wtd_imp_right = wtd_n_right * imp_right
wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left
wtd_avg_left_right_imp /= wtd_n_node
fractional_node_weight = (
est.tree_.weighted_n_node_samples[node] / X.shape[0])
actual_decrease = fractional_node_weight * (
imp_parent - wtd_avg_left_right_imp)
assert_greater_equal(actual_decrease, expected_decrease,
"Failed with {0} "
"expected min_impurity_decrease={1}"
.format(actual_decrease,
expected_decrease))
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_behaviour_constant_feature_after_splits():
X = np.transpose(np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]],
np.zeros((4, 11)))))
y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3]
for name, TreeEstimator in ALL_TREES.items():
# do not check extra random trees
if "ExtraTree" not in name:
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 2)
assert_equal(est.tree_.node_count, 5)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree_type, dataset in product(SPARSE_TREES, ("clf_small", "toy",
"digits", "multilabel",
"sparse-pos",
"sparse-neg",
"sparse-mix", "zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree_type, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree_type, dataset in product(SPARSE_TREES, ["boston", "reg_small"]):
if tree_type in REG_TREES:
yield (check_sparse_input, tree_type, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_parameters, tree_type, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_criterion, tree_type, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree_type in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree_type)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3],
[0.6, 0.3, 0.1, 1.0, 0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
def test_criterion_copy():
# Let's check whether copy of our criterion has the same type
# and properties as original
n_outputs = 3
n_classes = np.arange(3, dtype=np.intp)
n_samples = 100
def _pickle_copy(obj):
return pickle.loads(pickle.dumps(obj))
for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]:
for _, typename in CRITERIA_CLF.items():
criteria = typename(n_outputs, n_classes)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_classes_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_array_equal(n_classes, n_classes_)
for _, typename in CRITERIA_REG.items():
criteria = typename(n_outputs, n_samples)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_samples_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_equal(n_samples, n_samples_)
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e483.py
|
2
|
5426
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer, PolygonOutputLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer,
DimshuffleLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 4
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", "2013-04-18"),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
# ignore_incomplete=True
# offset_probability=0.5,
# ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-7,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 5000: 1e-5
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
# source_dict_copy = deepcopy(source_dict)
# source_dict_copy.update(dict(
# target_appliance=target_appliance,
# logger=logging.getLogger(name),
# seq_length=seq_length
# ))
# source = RandomSegmentsInMemory(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'type': PolygonOutputLayer,
'num_units': 1,
'seq_length': seq_length
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge', 'freezer'], 512),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 1500)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
import ipdb; ipdb.set_trace()
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e483.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
mit
|
saullocastro/pyNastran
|
pyNastran/applications/rainflow.py
|
1
|
7115
|
from __future__ import print_function
# this needs some serious TLC
from six.moves import range
from six import iteritems
from numpy import where, array, vstack, savetxt, loadtxt
def rainflow(icase, stress_in):
"""
Does rainflow counting based on stress (not nominal stress).
Works with a non-minimum first value.
"""
stress = reorganize_load(icase, stress_in)
#print('stress[%i] = %s' % (icase, stress))
stress, cycles = ASTM_E1049_rainflow(stress)
max_stress = []
min_stress = []
for i in range(len(cycles)):
max_stress.append(max(cycles[i][0], cycles[i][1]))
min_stress.append(min(cycles[i][0], cycles[i][1]))
return (max_stress, min_stress)
def reorganize_load(icase, stress_in):
"""
Reorganize Order History - ASTM E1049 5.4.5.2 (1)
We want to start at the minimum value and then loop around
until we get back to where we started
"""
data = array(stress_in)
imin = where(data == data.min())[0]
imin0 = imin[0]
# .. todo:: the double type conversion (list & array) is less than ideal
x = list(stress_in)
if imin0 != 0:
x = x[imin0:] + x[:imin0]
# remove repeats
i = 0
y = [x[0]]
while i < len(x) - 1:
if x[i+1] != x[i]:
y.append(x[i+1])
i += 1
# handles single-cycle impulse loading
if y[0] != y[-1]:
print(' y[0]=%s y[-1]=%s; adding y[0]' % (y[0], y[-1]))
y.append(y[0])
return y
def ASTM_E1049_rainflow(stress_in):
"""
Does rainflow counting based on stress (not nominal stress).
Works with a non-minimum first value.
From ASTM E1049 5.4.5.2 (1)
The ASTM spec is very Fortran heavy, so rather than Python-ifying it,
we just copy the spec.
.. note:: Assumes the minimum value is stress_in[0] and
the final value is a repeat of stress_in[0].
"""
x = 0.0
y = 0.0
stress = {}
cycles = {}
i = 0
icyc = 0
while stress_in:
temp_stress = stress_in.pop(0)
stress[i] = temp_stress
if i > 1:
if((stress[i] - stress[i-1] >= 0 and stress[i-1] - stress[i-2] >= 0) or
(stress[i] - stress[i-1] <= 0 and stress[i-1] - stress[i-2] < 0)):
del stress[i]
i -= 1
stress[i] = temp_stress
i += 1
i = -1
istop = 0
goto = 2
while istop == 0:
if goto == 2:
i += 1
if i >= len(stress):
istop = 1 # rainflow counting finished
goto = 3
elif goto == 3:
if i < 2:
goto = 2
else:
y = abs(stress[i-1] - stress[i-2])
x = abs(stress[i] - stress[i-1])
goto = 4
elif goto == 4:
if x < y:
goto = 2
else:
goto = 5
elif goto == 5:
cycles[icyc] = [min(stress[i-1], stress[i-2]),
max(stress[i-1], stress[i-2]), y]
icyc += 1
jend = len(stress) - 1
for j in range(i, jend+1):
stress[j-2] = stress[j]
del stress[jend]
del stress[jend - 1]
i -= 2
goto = 3
return (stress, cycles)
def rainflow_from_csv(input_csv, casenames, features,
write_csvs=True, delimiter=',',
xmax=None, legend_alpha=1.0):
"""
Rainflow counts from csv files.
This supports multiple features as separate columns.
Parameters
----------
fname : str
a file as described below
casenames : str
allows for case splitting
features : dict
key : int; column id to parse
value : str; name
xmax : float
the max value for the x (cycle) axis; helps to change the legend
delimiter : str; default=','
the delimiter for the output file (doesn't apply to input)
legend_alpha : float; default=1.0
the transparency
1=solid
0=transparent
Returns
-------
files : list[str]
filenames are of the form icase_icase_name.csv
Input_csv
---------
# name1_stress, name2_stress, ...
0.00, 0.0 # case 0 - line 1
20.0, 1.0 # case 1
50.0, 2.0 # case 2
etc.
0.00, 0.0 # case 0
casenames = (
# (casename, irow_start, irow_stop)
('normal', 0, 62),
('impulse', 63, 65),
etc.
)
features = { # the indicies are column numbers
0 : 'fillet',
1 : 'groove',
}
features = ['feature1', 'feature2']
so we get:
feature0_normal_fillet.csv
feature0_impulse_fillet.csv
feature1_normal_groove.csv
feature1_impulse_groove.csv
We'll also get corresponding png files. of the form:
fillet.png
groove.png
that show our cycling.
"""
import matplotlib.pyplot as plt
A = loadtxt(input_csv, delimiter=',', skiprows=1)
if len(A.shape) == 1:
A = A.reshape(len(A), 1)
icase = 0
for ifeature, feature_name in sorted(iteritems(features)):
plt.figure(ifeature)
legend = []
for case_name, min_index, max_index in casenames:
csv_out = 'feature%i_%s_%s.csv' % (ifeature, case_name, feature_name)
print(csv_out)
stress_case = A[min_index:max_index, ifeature]
min_stress, max_stress = rainflow(icase, stress_case)
if len(min_stress) == 0:
min_stress = [A[min_index, ifeature]]
max_stress = [A[max_index - 1, ifeature]]
B = vstack([min_stress, max_stress]).T
f = open(csv_out, 'wb')
f.write('# max stress%smin_stress\n' % delimiter)
savetxt(f, B, delimiter=delimiter)
plt.plot(range(min_index, max_index), stress_case)
legend.append(case_name)
icase += 1
# add the legend in the middle of the plot
leg = plt.legend(legend, fancybox=True)
# set the alpha value of the legend: it will be translucent
leg.get_frame().set_alpha(legend_alpha)
plt.title(feature_name)
if xmax:
plt.xlim([0, xmax])
plt.xlabel('Cycle Number')
plt.ylabel('Stress (ksi)')
plt.grid(True)
plt.savefig('%s.png' % feature_name)
#plt.show()
def main():
input_csv = 'test.csv'
n = 700
n1 = n // 3
casenames = {
('normal', 0, n1),
('impulse', n1, n - 1),
}
import os
from numpy import linspace, sin, cos, tan, vstack
x = linspace(0., 3.14*5, num=n)
y = sin(x) * cos(201 * x)
z = sin(x) * cos(201 * x) * tan(x)
A = vstack([y, z])
savetxt(input_csv, A.T, delimiter=',')
features = {
0 : 'fillet',
1 : 'groove',
}
rainflow_from_csv(input_csv, casenames, features,
write_csvs=True, delimiter=',',
xmax=None, legend_alpha=1.0)
os.remove(input_csv)
if __name__ == '__main__':
main()
|
lgpl-3.0
|
artmusic0/theano-learning.part02
|
Myfile_run-py_releasev2/cnn_training_computation.py
|
1
|
7523
|
import os
import sys, getopt
import time
import numpy
import theano
import theano.tensor as T
from sklearn import preprocessing
from cnn import CNN
import pickle as cPickle
from logistic_sgd import LogisticRegression
def fit(data, labels, filename = 'weightsv2.pkl'):
fit_predict(data, labels, filename = filename, action = 'fit')
def predict(test_dataset, filename = 'weightsv2.pkl' ):
return fit_predict(data=[], labels=[], filename= filename, test_datasets=[test_dataset], action = 'predict')[0]
def fit_predict(data, labels, action, filename, test_datasets = [], learning_rate=0.07, n_epochs=1000, nkerns=[20, 50], batch_size=50, seed=8000):
rng = numpy.random.RandomState(seed)
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
index = T.lscalar() # index to a [mini]batch
if action=='fit':
NUM_TRAIN = len(data)
print NUM_TRAIN
print batch_size
if NUM_TRAIN % batch_size != 0: #if the last batch is not full, just don't use the remainder
whole = (NUM_TRAIN / batch_size) * batch_size
data = data[:whole]
NUM_TRAIN = len(data)
print NUM_TRAIN
print batch_size
# random permutation
indices = rng.permutation(NUM_TRAIN)
data, labels = data[indices, :], labels[indices]
# batch_size == 500, splits (480, 20). We will use 96% of the data for training, and the rest to validate the NN while training
is_train = numpy.array( ([0]* (batch_size - 20) + [1] * 20) * (NUM_TRAIN / batch_size))
# now we split the dataset to test and valid datasets
train_set_x, train_set_y = numpy.array(data[is_train==0]), labels[is_train==0]
valid_set_x, valid_set_y = numpy.array(data[is_train==1]), labels[is_train==1]
# compute number of minibatches
n_train_batches = len(train_set_y) / batch_size
n_valid_batches = len(valid_set_y) / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
epoch = T.scalar()
#index = T.lscalar() # index to a [mini]batch
#x = T.matrix('x') # the data is presented as rasterized images
#y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
# construct the CNN class
classifier = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
train_set_x = theano.shared(numpy.asarray(train_set_x, dtype=theano.config.floatX))
train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y, dtype=theano.config.floatX)), 'int32')
valid_set_x = theano.shared(numpy.asarray(valid_set_x, dtype=theano.config.floatX))
valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y, dtype=theano.config.floatX)), 'int32')
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
cost = classifier.layer3.negative_log_likelihood(y)
# create a list of gradients for all model parameters
grads = T.grad(cost, classifier.params)
# specify how to update the parameters of the model as a list of (variable, update expression) pairs
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(classifier.params, grads)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
###############
# TRAIN MODEL #
###############
print '... training'
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
# here is an example how to print the current value of a Theano variable: print test_set_x.shape.eval()
# start training
while (epoch < n_epochs):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (epoch) % 5 == 0 and minibatch_index==0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
###############
# PREDICTIONS #
###############
# save and load
f = file(filename, 'wb')
cPickle.dump(classifier.__getstate__(), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
end_time = time.clock()
print >> sys.stderr, ('The code ran for %.2fm' % ((end_time - start_time) / 60.))
if action == 'predict':
# construct the CNN class
classifier_2 = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
print "...."
f = file(filename, 'rb')
classifier_2.__setstate__(cPickle.load(f))
f.close()
RET = []
for it in range(len(test_datasets)):
test_data = test_datasets[it]
N = len(test_data)
test_data = theano.shared(numpy.asarray(test_data, dtype=theano.config.floatX))
# just zeroes
test_labels = T.cast(theano.shared(numpy.asarray(numpy.zeros(batch_size), dtype=theano.config.floatX)), 'int32')
ppm = theano.function([index], classifier_2.layer3.pred_probs(),
givens={
x: test_data[index * batch_size: (index + 1) * batch_size],
y: test_labels
}, on_unused_input='warn')
# p : predictions, we need to take argmax, p is 3-dim: (# loop iterations x batch_size x 2)
p = [ppm(ii) for ii in xrange( N / batch_size)]
#p_one = sum(p, [])
#print p
p = numpy.array(p).reshape((N, 10))
print p
p = numpy.argmax(p, axis=1)
p = p.astype(int)
RET.append(p)
return RET
|
gpl-3.0
|
mwaskom/lyman
|
lyman/workflows/tests/test_preproc.py
|
1
|
27150
|
import os.path as op
import numpy as np
import pandas as pd
from scipy import signal
import nipype
import nibabel as nib
import pytest
from .. import preproc
class TestPreprocWorkflow(object):
def save_image_frames(self, data_list, affine, fstem):
n = len(data_list)
filenames = ["{}{}.nii.gz".format(fstem, i) for i in range(n)]
for frame, fname in zip(data_list, filenames):
nib.save(nib.Nifti1Image(frame, affine), fname)
return filenames
def test_preproc_workflow_creation(self, lyman_info):
info = lyman_info["info"]
subjects = lyman_info["subjects"]
sessions = lyman_info["sessions"]
wf = preproc.define_preproc_workflow(info, subjects, sessions)
# Check basic information about the workflow
assert isinstance(wf, nipype.Workflow)
assert wf.name == "preproc"
assert wf.base_dir == op.join(info.cache_dir, info.experiment_name)
# Check root directory of output
template_out = wf.get_node("template_output")
assert template_out.inputs.base_directory == info.proc_dir
timeseries_out = wf.get_node("timeseries_output")
assert timeseries_out.inputs.base_directory == info.proc_dir
# Check the list of nodes we expect
expected_nodes = ["subject_source", "session_source", "run_source",
"session_input", "run_input",
"estimate_distortions", "finalize_unwarping",
"transform_jacobian",
"fm2anat", "fm2anat_qc",
"sb2fm", "sb2fm_qc",
"ts2sb", "ts2sb_qc",
"combine_premats", "combine_postmats",
"restore_timeseries", "restore_template",
"finalize_timeseries", "finalize_template",
"save_info", "template_output", "timeseries_output"]
expected_nodes.sort()
assert wf.list_node_names() == expected_nodes
def test_preproc_iterables(self, lyman_info):
info = lyman_info["info"]
scan_info = info.scan_info
# -- Test full iterables
iterables = preproc.generate_iterables(
scan_info, "exp_alpha", ["subj01", "subj02"],
)
expected_iterables = (
["subj01", "subj02"],
{"subj01": [("subj01", "sess01"), ("subj01", "sess02")],
"subj02": [("subj02", "sess01")]},
{("subj01", "sess01"):
[("subj01", "sess01", "run01"),
("subj01", "sess01", "run02")],
("subj01", "sess02"):
[("subj01", "sess02", "run01")],
("subj02", "sess01"):
[("subj02", "sess01", "run01"),
("subj02", "sess01", "run02"),
("subj02", "sess01", "run03")]},
)
assert iterables == expected_iterables
# -- Test iterables as set in workflow
wf = preproc.define_preproc_workflow(info, ["subj01", "subj02"], None)
subject_source = wf.get_node("subject_source")
assert subject_source.iterables == ("subject", iterables[0])
session_source = wf.get_node("session_source")
assert session_source.iterables == ("session", iterables[1])
run_source = wf.get_node("run_source")
assert run_source.iterables == ("run", iterables[2])
# -- Test single subject
iterables = preproc.generate_iterables(
scan_info, "exp_alpha", ["subj01"],
)
expected_iterables = (
["subj01"],
{"subj01": [("subj01", "sess01"), ("subj01", "sess02")]},
{("subj01", "sess01"):
[("subj01", "sess01", "run01"),
("subj01", "sess01", "run02")],
("subj01", "sess02"):
[("subj01", "sess02", "run01")]}
)
assert iterables == expected_iterables
# -- Test different experiment
iterables = preproc.generate_iterables(
scan_info, "exp_beta", ["subj01", "subj02"],
)
expected_iterables = (
["subj01"],
{"subj01": [("subj01", "sess02")]},
{("subj01", "sess02"):
[("subj01", "sess02", "run01"),
("subj01", "sess02", "run02"),
("subj01", "sess02", "run03")]},
)
assert iterables == expected_iterables
# -- Test single subject, single session
iterables = preproc.generate_iterables(
scan_info, "exp_alpha", ["subj01"], ["sess02"],
)
expected_iterables = (
["subj01"],
{"subj01": [("subj01", "sess02")]},
{("subj01", "sess02"):
[("subj01", "sess02", "run01")]},
)
assert iterables == expected_iterables
def test_run_input(self, execdir, template):
random_seed = sum(map(ord, "run_input"))
rs = np.random.RandomState(random_seed)
# --- Generate random test data
subject = template["subject"]
session, run = "sess01", "run01"
run_tuple = subject, session, run
exp_name = template["info"].experiment_name
sb_template = template["info"].sb_template
ts_template = template["info"].ts_template
crop_frames = 2
affine = np.array([[-2, 0, 0, 10],
[0, -2, -1, 10],
[0, 1, 2, 5],
[0, 0, 0, 1]])
func_dir = template["data_dir"].join(subject).join("func")
shape = 12, 8, 4
n_frames = 10
keys = dict(experiment=exp_name, session=session, run=run)
sb_data = rs.randint(10, 20, shape).astype(np.int16)
sb_file = str(func_dir.join(sb_template.format(**keys)))
nib.save(nib.Nifti1Image(sb_data, affine), sb_file)
ts_data = rs.normal(10, 20, shape + (n_frames,))
ts_file = str(func_dir.join(ts_template.format(**keys)))
nib.save(nib.Nifti1Image(ts_data, affine), ts_file)
# --- Run the interface
out = preproc.RunInput(
run=run_tuple,
data_dir=str(template["data_dir"]),
proc_dir=str(template["proc_dir"]),
experiment=exp_name,
sb_template=sb_template,
ts_template=template["info"].ts_template,
crop_frames=crop_frames,
).run().outputs
# --- Test the outputs
assert out.run_tuple == run_tuple
assert out.subject == subject
assert out.session == session
assert out.run == run
# Test outputs paths
framedir = execdir.join("frames")
ts_frames = [str(framedir.join("frame{:04d}.nii.gz".format(i)))
for i in range(n_frames - crop_frames)]
assert out.ts_file == execdir.join("ts.nii.gz")
assert out.sb_file == execdir.join("sb.nii.gz")
assert out.ts_plot == execdir.join("raw.gif")
assert out.ts_frames == ts_frames
assert out.reg_file == template["reg_file"]
assert out.seg_file == template["seg_file"]
assert out.anat_file == template["anat_file"]
assert out.mask_file == template["mask_file"]
# Test the output timeseries
std_affine = np.array([[2, 0, 0, -12],
[0, 2, -1, -4],
[0, -1, 2, 12],
[0, 0, 0, 1]])
ts_img_out = nib.load(out.ts_file)
assert np.array_equal(ts_img_out.affine, std_affine)
assert ts_img_out.header.get_data_dtype() == np.dtype(np.float32)
ts_data_out = ts_img_out.get_fdata()
ts_data = ts_data[::-1, ::-1, :, crop_frames:].astype(np.float32)
assert np.array_equal(ts_data_out, ts_data)
for i, frame_fname in enumerate(out.ts_frames):
frame_data = nib.load(frame_fname).get_fdata()
assert np.array_equal(frame_data, ts_data[..., i])
# Test that qc files exists
assert op.exists(out.ts_plot)
def test_session_input(self, execdir, template):
random_seed = sum(map(ord, "session_input"))
rs = np.random.RandomState(random_seed)
subject = template["subject"]
session = "sess01"
session_tuple = subject, session
fm_template = template["info"].fm_template
phase_encoding = template["info"].phase_encoding
func_dir = template["data_dir"].join(subject).join("func")
shape = (12, 8, 4)
n_frames = 3
affine = np.array([[-2, 0, 0, 10],
[0, -2, -1, 10],
[0, 1, 2, 5],
[0, 0, 0, 1]])
fieldmap_data = []
fieldmap_files = []
for encoding in [phase_encoding, phase_encoding[::-1]]:
fm_keys = dict(session=session, encoding=encoding)
fname = str(func_dir.join(fm_template.format(**fm_keys)))
data = rs.randint(10, 25, shape + (n_frames,)).astype(np.int16)
fieldmap_data.append(data)
fieldmap_files.append(fname)
nib.save(nib.Nifti1Image(data, affine), fname)
# --- Run the interface
out = preproc.SessionInput(
session=session_tuple,
data_dir=str(template["data_dir"]),
proc_dir=str(template["proc_dir"]),
fm_template=fm_template,
phase_encoding=phase_encoding,
).run().outputs
# --- Test the outputs
assert out.session_tuple == session_tuple
assert out.subject == subject
assert out.session == session
# Test the output paths
frame_template = "fieldmap_{:02d}.nii.gz"
out_frames = [execdir.join(frame_template.format(i))
for i in range(n_frames * 2)]
assert out.fm_file == execdir.join("fieldmap.nii.gz")
assert out.fm_frames == out_frames
assert out.reg_file == template["reg_file"]
assert out.seg_file == template["seg_file"]
assert out.anat_file == template["anat_file"]
assert out.mask_file == template["mask_file"]
# Test the output images
std_affine = np.array([[2, 0, 0, -12],
[0, 2, -1, -4],
[0, -1, 2, 12],
[0, 0, 0, 1]])
out_fm_img = nib.load(out.fm_file)
assert np.array_equal(out_fm_img.affine, std_affine)
fm_data = np.concatenate(fieldmap_data,
axis=-1).astype(np.float32)[::-1, ::-1]
fm_data_out = out_fm_img.get_fdata()
assert np.array_equal(fm_data_out, fm_data)
for i, frame in enumerate(out_frames):
frame_data_out = nib.load(str(frame)).get_fdata()
assert np.array_equal(frame_data_out, fm_data[..., i])
# Test the output phase encoding information
phase_encode_codes = ["y"] * n_frames + ["y-"] * n_frames
assert out.phase_encoding == phase_encode_codes
assert out.readout_times == [1] * (n_frames * 2)
# Test reversed phase encoding
phase_encoding = phase_encoding[::-1]
out = preproc.SessionInput(
session=session_tuple,
data_dir=str(template["data_dir"]),
proc_dir=str(template["proc_dir"]),
fm_template=fm_template,
phase_encoding=phase_encoding,
).run().outputs
# Test the output images
fm_data = np.concatenate(fieldmap_data[::-1],
axis=-1).astype(np.float32)[::-1, ::-1]
fm_data_out = nib.load(out.fm_file).get_fdata()
assert np.array_equal(fm_data_out, fm_data)
for i, frame in enumerate(out_frames):
frame_data_out = nib.load(str(frame)).get_fdata()
assert np.array_equal(frame_data_out, fm_data[..., i])
def test_combine_linear_transforms(self, execdir):
a, b, c, d = np.random.randn(4, 4, 4)
np.savetxt("ts2sb.mat", a)
np.savetxt("sb2fm.mat", b)
np.savetxt("fm2anat.mat", c)
np.savetxt("anat2temp.mat", d)
ab = np.dot(b, a)
cd = np.dot(d, c)
ifc = preproc.CombineLinearTransforms(ts2sb_file="ts2sb.mat",
sb2fm_file="sb2fm.mat",
fm2anat_file="fm2anat.mat",
anat2temp_file="anat2temp.mat")
out = ifc.run().outputs
assert np.loadtxt(out.ts2fm_file) == pytest.approx(ab)
assert np.loadtxt(out.fm2temp_file) == pytest.approx(cd)
def test_finalize_unwarping(self, execdir):
# --- Generate random image data
random_seed = sum(map(ord, "finalize_unwarping"))
rs = np.random.RandomState(random_seed)
shape = 12, 8, 4
n_frames = 6
shape_4d = shape + (n_frames,)
affine = np.eye(4)
affine[:3, :3] *= 2
phase_encoding = ["y+"] * 3 + ["y-"] * 3
session_tuple = "subj01", "sess01"
raw_data = rs.uniform(0, 1, shape_4d)
raw_file = "raw_frames.nii.gz"
nib.save(nib.Nifti1Image(raw_data, affine), raw_file)
corrected_data = rs.uniform(0, 10, shape_4d)
corrected_file = "corrected_frames.nii.gz"
nib.save(nib.Nifti1Image(corrected_data, affine), corrected_file)
warp_shape = shape + (3,)
warp_data = [rs.uniform(-8, 8, warp_shape) for _ in range(n_frames)]
warp_files = self.save_image_frames(warp_data, affine, "warp")
jacobian_data = [rs.uniform(.5, 1.5, shape) for _ in range(n_frames)]
jacobian_files = self.save_image_frames(jacobian_data, affine, "jac")
# --- Run the interface
out = preproc.FinalizeUnwarping(
raw_file=raw_file,
corrected_file=corrected_file,
warp_files=warp_files,
jacobian_files=jacobian_files,
phase_encoding=phase_encoding,
session_tuple=session_tuple,
).run().outputs
# --- Test outputs
# Test output filenames
assert out.raw_file == execdir.join("raw.nii.gz")
assert out.corrected_file == execdir.join("func.nii.gz")
assert out.warp_file == execdir.join("warp.nii.gz")
assert out.mask_file == execdir.join("warp_mask.nii.gz")
assert out.jacobian_file == execdir.join("jacobian.nii.gz")
assert out.warp_plot == execdir.join("warp.png")
assert out.unwarp_gif == execdir.join("unwarp.gif")
# Test that the right frame of the raw image is selected
raw_data_out = nib.load(out.raw_file).get_fdata()
assert np.array_equal(raw_data_out, raw_data[..., 0])
# Test that the corrected image is a temporal average
corrected_data = corrected_data.mean(axis=-1)
corrected_data_out = nib.load(out.corrected_file).get_fdata()
assert corrected_data_out == pytest.approx(corrected_data)
# Test that the warp image has the right geometry
warp_img_out = nib.load(out.warp_file)
assert np.array_equal(warp_img_out.affine, affine)
# Test that the warp image is the right frame
warp_data_out = warp_img_out.get_fdata()
assert np.array_equal(warp_data_out, warp_data[0])
# Test the warp mask
warp_mask = (np.abs(warp_data[0][..., 1]) < 4).astype(np.int)
warp_mask_out = nib.load(out.mask_file).get_fdata().astype(np.int)
assert np.array_equal(warp_mask_out, warp_mask)
# Test that the jacobians have same data but new geomtery
jacobian_data = np.stack(jacobian_data, axis=-1)
jacobian_img_out = nib.load(out.jacobian_file)
jacobian_data_out = jacobian_img_out.get_fdata()
assert np.array_equal(jacobian_img_out.affine, affine)
assert np.array_equal(jacobian_data_out, jacobian_data)
# Test that qc plots exist
assert op.exists(out.warp_plot)
assert op.exists(out.unwarp_gif)
def test_finalize_timeseries(self, execdir, template):
# --- Generate input data
experiment = "exp_alpha"
run_tuple = subject, session, run = "subj01", "sess01", "run01"
random_seed = sum(map(ord, "finalize_timeseries"))
rs = np.random.RandomState(random_seed)
shape = 12, 8, 4
n_tp = 10
affine = np.eye(4)
affine[:3, :3] *= 2
target = 100
fov = np.arange(np.product(shape)).reshape(shape) != 11
in_data = [rs.normal(500, 10, shape) * fov for _ in range(n_tp)]
in_files = self.save_image_frames(in_data, affine, "func")
jacobian_data = rs.uniform(.5, 1.5, shape + (6,))
jacobian_file = "jacobian.nii.gz"
nib.save(nib.Nifti1Image(jacobian_data, affine), jacobian_file)
mc_data = rs.normal(0, 1, (n_tp, 6))
mc_file = "mc.txt"
np.savetxt(mc_file, mc_data)
# --- Run the interface
out = preproc.FinalizeTimeseries(
experiment=experiment,
run_tuple=run_tuple,
in_files=in_files,
jacobian_file=jacobian_file,
anat_file=template["anat_file"],
seg_file=template["seg_file"],
mask_file=template["mask_file"],
mc_file=mc_file,
).run().outputs
# --- Test the outputs
# Test output filenames
assert out.out_file == execdir.join("func.nii.gz")
assert out.out_gif == execdir.join("func.gif")
assert out.out_png == execdir.join("func.png")
assert out.mean_file == execdir.join("mean.nii.gz")
assert out.mean_plot == execdir.join("mean.png")
assert out.tsnr_file == execdir.join("tsnr.nii.gz")
assert out.tsnr_plot == execdir.join("tsnr.png")
assert out.mask_file == execdir.join("mask.nii.gz")
assert out.mask_plot == execdir.join("mask.png")
assert out.noise_file == execdir.join("noise.nii.gz")
assert out.noise_plot == execdir.join("noise.png")
assert out.mc_file == execdir.join("mc.csv")
# Test the output path
output_path = op.join(subject, experiment, "timeseries",
"{}_{}".format(session, run))
assert out.output_path == output_path
# Test the output timeseries
out_img_out = nib.load(out.out_file)
out_data_out = out_img_out.get_fdata()
mask = nib.load(template["mask_file"]).get_fdata()
func_mask = mask.astype(np.bool) & fov
out_data = np.stack(in_data, axis=-1)
out_data *= jacobian_data[..., [0]]
out_data *= np.expand_dims(func_mask, -1)
out_data *= target / out_data[func_mask].mean()
mask_mean = out_data[func_mask].mean(axis=-1, keepdims=True)
out_data[func_mask] = signal.detrend(out_data[func_mask]) + mask_mean
assert np.array_equal(out_img_out.affine, affine)
assert out_data_out == pytest.approx(out_data)
assert out_data_out[func_mask].mean() == pytest.approx(target)
# Test the output mask
mask_data_out = nib.load(out.mask_file).get_fdata()
assert np.array_equal(mask_data_out, func_mask.astype(np.float))
# Test the output temporal statistics
mean_out = nib.load(out.mean_file).get_fdata()
tsnr_out = nib.load(out.tsnr_file).get_fdata()
with np.errstate(all="ignore"):
mean = out_data.mean(axis=-1)
tsnr = mean / out_data.std(axis=-1)
tsnr[~func_mask] = 0
assert mean_out == pytest.approx(mean)
assert tsnr_out == pytest.approx(tsnr)
# Test the output motion correction data
mc_cols = ["rot_x", "rot_y", "rot_z",
"trans_x", "trans_y", "trans_z"]
mc_out = pd.read_csv(out.mc_file)
assert mc_out.columns.tolist() == mc_cols
assert mc_out.values == pytest.approx(mc_data)
# Test that the qc files exist
assert op.exists(out.out_gif)
assert op.exists(out.out_png)
assert op.exists(out.mean_plot)
assert op.exists(out.tsnr_plot)
assert op.exists(out.mask_plot)
assert op.exists(out.noise_plot)
def test_finalize_template(self, execdir, template):
# --- Generate input data
experiment = "exp_alpha"
session_tuple = subject, session = "subj01", "sess01"
random_seed = sum(map(ord, "finalize_template"))
rs = np.random.RandomState(random_seed)
shape = 12, 8, 4
n_frames = 6
n_runs = 4
affine = np.eye(4)
affine[:3, :3] *= 2
target = 100
in_data = [rs.normal(500, 10, shape) for _ in range(n_frames)]
in_files = self.save_image_frames(in_data, affine, "func")
mask_data = [rs.choice([0, 1], shape, True, [.1, .9])
for _ in range(n_runs)]
mask_files = self.save_image_frames(mask_data, affine, "mask")
jacobian_data = rs.uniform(.5, 1.5, shape + (n_frames,))
jacobian_file = "jacobian.nii.gz"
nib.save(nib.Nifti1Image(jacobian_data, affine), jacobian_file)
mean_data = [rs.normal(100, 5, shape) for _ in range(n_runs)]
mean_files = self.save_image_frames(mean_data, affine, "mean")
tsnr_data = [rs.normal(100, 5, shape) for _ in range(n_runs)]
tsnr_files = self.save_image_frames(tsnr_data, affine, "tsnr")
noise_data = [rs.choice([0, 1], shape, True, [.95, .05])
for _ in range(n_runs)]
noise_files = self.save_image_frames(noise_data, affine, "noise")
# --- Run the interface
out = preproc.FinalizeTemplate(
session_tuple=session_tuple,
experiment=experiment,
in_files=in_files,
seg_file=template["seg_file"],
anat_file=template["anat_file"],
jacobian_file=jacobian_file,
mask_files=mask_files,
mean_files=mean_files,
tsnr_files=tsnr_files,
noise_files=noise_files,
).run().outputs
# --- Test the outputs
# Test output filenames
assert out.out_file == execdir.join("func.nii.gz")
assert out.out_plot == execdir.join("func.png")
assert out.mask_file == execdir.join("mask.nii.gz")
assert out.mask_plot == execdir.join("mask.png")
assert out.noise_file == execdir.join("noise.nii.gz")
assert out.noise_plot == execdir.join("noise.png")
assert out.mean_file == execdir.join("mean.nii.gz")
assert out.mean_plot == execdir.join("mean.png")
assert out.tsnr_file == execdir.join("tsnr.nii.gz")
assert out.tsnr_plot == execdir.join("tsnr.png")
# Test the output path
output_path = op.join(subject, experiment, "template", session)
assert out.output_path == output_path
# Test the mask conjunction
mask = np.all(mask_data, axis=0)
mask_data_out = nib.load(out.mask_file).get_fdata()
assert np.array_equal(mask_data_out, mask.astype(np.float))
# Test the final template
out_data_out = nib.load(out.out_file).get_fdata()
out_data = np.stack(in_data, axis=-1) * jacobian_data
out_data[mask] *= target / out_data[mask].mean(axis=0, keepdims=True)
out_data = out_data.mean(axis=-1) * mask
assert np.array_equal(out_data_out, out_data)
# Test the noise mask union
noise_data_out = nib.load(out.noise_file).get_fdata()
noise_data = np.any(noise_data, axis=0).astype(np.float)
assert np.array_equal(noise_data_out, noise_data)
# Test the average mean image
mean_data_out = nib.load(out.mean_file).get_fdata()
mean_data = np.mean(mean_data, axis=0) * mask
assert np.array_equal(mean_data_out, mean_data)
# Test the average tsnr image
tsnr_data_out = nib.load(out.tsnr_file).get_fdata()
tsnr_data = np.mean(tsnr_data, axis=0) * mask
assert np.array_equal(tsnr_data_out, tsnr_data)
# Test that the qc images exist
assert op.exists(out.out_plot)
assert op.exists(out.mask_plot)
assert op.exists(out.mean_plot)
assert op.exists(out.tsnr_plot)
assert op.exists(out.noise_plot)
def test_realignment_report(self, execdir):
target_data = np.random.uniform(0, 100, (12, 8, 4))
target_file = "target.nii.gz"
nib.save(nib.Nifti1Image(target_data, np.eye(4)), target_file)
mc_data = np.random.normal(0, 1, (20, 6))
mc_file = "mc.txt"
np.savetxt(mc_file, mc_data)
run_tuple = "subj01", "sess01", "run01"
out = preproc.RealignmentReport(
target_file=target_file,
realign_params=mc_file,
run_tuple=run_tuple,
).run().outputs
assert out.params_plot == execdir.join("mc_params.png")
assert out.target_plot == execdir.join("mc_target.png")
assert op.exists(out.params_plot)
assert op.exists(out.target_plot)
def test_anat_reg_report(self, execdir):
subject_id = "subj01"
session_tuple = subject_id, "sess01"
data_dir = execdir.mkdir("data")
mri_dir = data_dir.mkdir(subject_id).mkdir("mri")
shape = (12, 8, 4)
affine = np.eye(4)
cost_file = "cost.txt"
cost_array = np.random.uniform(0, 1, 5)
np.savetxt(cost_file, cost_array)
in_data = np.random.normal(100, 5, shape)
in_file = "func.nii.gz"
nib.save(nib.Nifti1Image(in_data, affine), in_file)
wm_data = np.random.randint(0, 2, shape).astype("uint8")
wm_file = mri_dir.join("wm.mgz")
nib.save(nib.MGHImage(wm_data, affine), str(wm_file))
aseg_data = np.random.randint(0, 5, shape).astype("uint8")
aseg_file = mri_dir.join("aseg.mgz")
nib.save(nib.MGHImage(aseg_data, affine), str(aseg_file))
out = preproc.AnatRegReport(
subject_id=subject_id,
session_tuple=session_tuple,
data_dir=str(data_dir),
in_file=in_file,
cost_file=cost_file,
).run().outputs
assert out.out_file == execdir.join("reg.png")
assert op.exists(out.out_file)
def test_coreg_gif(self, execdir):
in_data = np.random.uniform(0, 100, (12, 8, 4))
in_file = "in.nii.gz"
nib.save(nib.Nifti1Image(in_data, np.eye(4)), in_file)
ref_data = np.random.uniform(0, 100, (12, 8, 4, 3))
ref_file = "ref.nii.gz"
nib.save(nib.Nifti1Image(ref_data, np.eye(4)), ref_file)
out_file = "out.gif"
run_tuple = "subj01", "sess01", "run01"
out = preproc.CoregGIF(
in_file=in_file,
ref_file=ref_file,
out_file=out_file,
run_tuple=run_tuple,
).run().outputs
assert out.out_file == execdir.join(out_file)
assert op.exists(out_file)
|
bsd-3-clause
|
mbarbie1/DeepSlice
|
python/keras_small/test/test_show_process_images.py
|
1
|
3900
|
# -*- coding: utf-8 -*-
"""
Test data augmentation of the small slices data set
Created on Thu Dec 21 14:59:42 2017
@author: mbarbier
"""
#from keras import model
from data_small import loadData
import numpy as np
from keras import backend as K
from module_model_unet import get_unet, preprocess
from module_callbacks import trainCheck
import matplotlib.pyplot as plt
K.set_image_data_format('channels_last') # TF dimension ordering in this code
#img_rows = 384
#img_cols = 384
img_rows = 96
img_cols = 96
n_epochs = 30
from scipy.misc import imsave
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
print('-'*30)
print('Loading train data...')
print('-'*30)
imgs_train, imgs_test, imgs_mask_train_all, imgs_mask_test_all, imgs_id_test, imgs_id_train = loadData( 0.8, 16 )
images = imgs_train#np.expand_dims( imgs_train, axis = 3 )
#images = np.expand_dims( images, axis = 3 )
images = preprocess(images, img_rows, img_cols )
masks_all = imgs_mask_train_all
print('-'*30)
print('Fuse masks to single multi-label image')
print('-'*30)
regionList = [ "cb", "hp", "cx", "th", "mb", "bs" ]
regionIdList = range(1, len(regionList)+1)
masks = np.zeros( masks_all[regionList[0]].shape )
#masks = np.expand_dims( masks, axis = 3 )
masks = preprocess(masks, img_rows, img_cols )
print('-'*30)
print('Load unet model')
print('-'*30)
model = get_unet( img_rows, img_cols )
layer_dict = dict([(layer.name, layer) for layer in model.layers])
#print('-'*30)
#print('Load unet model')
#print('-'*30)
#img = images[0]
#img_float = img.astype(np.float32)
#img_float = deprocess_image(img_float)
#layer_name = 'testLayerName'
#filter_index = 0
#imsave('%s_filter_%d.png' % (layer_name, filter_index), img_float)
# Compile model
#model = get_unet( img_rows, img_cols )
#.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
#history = model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, verbose=0)
if not os.path.isdir(flag.output_dir):
os.mkdir(flag.output_dir)
show_pred_masks = trainCheck(flag)
history = model.fit(images, masks, batch_size=32, epochs=n_epochs, verbose=1, shuffle=True,
validation_split=0.2, callbacks=[show_pred_masks])
print(history.history.keys())
# summarize history for accuracy
def showTrainingHistory(history):
plt.figure()
plt.plot(history.history['pixelwise_binary_ce'])
plt.plot(history.history['val_pixelwise_binary_ce'])
plt.title('model pixelwise_binary_ce')
plt.ylabel('pixelwise_binary_ce')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.figure()
plt.plot(history.history['pixelwise_l2_loss'])
plt.plot(history.history['val_pixelwise_l2_loss'])
plt.title('model pixelwise_l2_loss')
plt.ylabel('pixelwise_l2_loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.figure()
plt.plot(history.history['dice_coef'])
plt.plot(history.history['val_dice_coef'])
plt.title('model dice_coef')
plt.ylabel('dice_coef')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# list all data in history
print(history.history.keys())
showTrainingHistory(history)
|
apache-2.0
|
charanpald/sandbox
|
sandbox/util/Evaluator.py
|
1
|
12323
|
import numpy
from sandbox.util.Parameter import Parameter
#TODO: Test this file
class Evaluator(object):
"""
A class to evaluate machine learning performance.
"""
def __init__(self):
pass
@staticmethod
def evaluateBinary1DLabels(testY, predY):
numEvaluations = 6
evaluations = numpy.zeros(numEvaluations)
evaluations[0] = Evaluator.binaryError(testY, predY)
#evaluations[1] = mlpy.sens(testY, predY)
#evaluations[2] = mlpy.spec(testY, predY)
evaluations[3] = Evaluator.binaryErrorP(testY, predY)
evaluations[4] = Evaluator.binaryErrorN(testY, predY)
evaluations[5] = Evaluator.balancedError(testY, predY)
return evaluations
@staticmethod
def balancedError(testY, predY):
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return 0.5*(Evaluator.binaryErrorP(testY, predY)+Evaluator.binaryErrorN(testY, predY))
@staticmethod
def weightedRootMeanSqError(testY, predY):
"""
Weighted root mean square error.
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
alpha = 1.0
w = numpy.exp(alpha * testY)
return numpy.linalg.norm((testY - predY)*numpy.sqrt(w))/numpy.sqrt(testY.shape[0])
@staticmethod
def rootMeanSqError(testY, predY):
"""
This is the error given by sqrt{1//y.shape sum_i (py - y)^2}
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return numpy.linalg.norm(testY - predY)/numpy.sqrt(testY.size)
@staticmethod
def meanAbsError(testY, predY):
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return numpy.abs(testY - predY).mean()
@staticmethod
def meanSqError(testY, predY):
"""
This is the error given by the mean squared difference between examamples
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return ((testY - predY)**2).mean()
@staticmethod
def evaluateWindowError(D, windowSize, pairIndices):
"""
The input is a matrix D of distances between examples such that
D_ij = d(x_i, x_j). The aim is to match each example to the corresponding
pair based on ranking in order of their distance. An error is
counted if the given item in the pair is not in the window.
"""
if D.shape[0]!=D.shape[1]:
raise ValueError("D must be a square and symmetric matrix")
numExamples = D.shape[0]
numPairs = numExamples/2
error = 0
for i in pairIndices[:, 0]:
windowInds = numpy.argsort(D[i, :])[0:windowSize]
error = error + (windowInds != pairIndices[i, 1]).all()
return float(error)/numPairs
@staticmethod
def binaryError(testY, predY):
"""
Work out the error on a set of -1/+1 labels
"""
Parameter.checkClass(testY, numpy.ndarray)
Parameter.checkClass(predY, numpy.ndarray)
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
error = numpy.sum(testY != predY)/float(predY.shape[0])
return error
@staticmethod
def binaryBootstrapError(testY, predTestY, trainY, predTrainY, weight):
"""
Evaluate an error in conjunction with a bootstrap method by computing
w*testErr + (1-w)*trainErr
"""
Parameter.checkFloat(weight, 0.0, 1.0)
return weight*Evaluator.binaryError(testY, predTestY) + (1-weight)*Evaluator.binaryError(trainY, predTrainY)
@staticmethod
def binaryErrorP(testY, predY):
"""
Work out the error on a set of -1/+1 labels
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
posInds = (testY == 1)
if testY[posInds].shape[0] != 0:
error = numpy.sum(numpy.abs(testY[posInds] - predY[posInds]))/(2.0*testY[posInds].shape[0])
else:
error = 0.0
return error
@staticmethod
def binaryErrorN(testY, predY):
"""
Work out the error on a set of -1/+1 labels
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
negInds = (testY == -1)
if testY[negInds].shape[0] != 0:
error = numpy.sum(numpy.abs(testY[negInds] - predY[negInds]))/(2.0*testY[negInds].shape[0])
else:
error = 0.0
return error
@staticmethod
def auc2(trueY, predY):
return Evaluator.auc(predY, trueY)
@staticmethod
def auc(predY, trueY):
"""
Can be used in conjunction with evaluateCV using the scores, and true
labels. Note the order of parameters.
"""
try:
import sklearn.metrics
except ImportError:
raise
Parameter.checkClass(predY, numpy.ndarray)
Parameter.checkClass(trueY, numpy.ndarray)
if predY.ndim != 1:
raise ValueError("Expecting predY to be 1D")
if trueY.ndim != 1:
raise ValueError("Expecting trueY to be 1D")
if numpy.unique(trueY).shape[0] > 2:
raise ValueError("Found more than two label types in trueY")
if numpy.unique(trueY).shape[0] == 1:
return 0.5
fpr, tpr, threshold = sklearn.metrics.roc_curve(trueY.ravel(), predY.ravel())
return sklearn.metrics.metrics.auc(fpr, tpr)
@staticmethod
def roc(testY, predY):
try:
import sklearn.metrics
except ImportError:
raise
if numpy.unique(testY).shape[0] == 1:
fpr = numpy.array([])
tpr = numpy.array([])
else:
fpr, tpr, threshold = sklearn.metrics.roc_curve(testY.ravel(), predY.ravel())
#Insert 0,0 at the start of fpr and tpr
if fpr[0] != 0.0 or tpr[0] != 0.0:
fpr = numpy.insert(fpr, 0, 0)
tpr = numpy.insert(tpr, 0, 0)
return (fpr, tpr)
@staticmethod
def localAuc(testY, predY, u):
"""
Compute the local AUC measure for a given ROC curve. The parameter u is
the proportion of best instances to use u = P(s(X) > t).
"""
Parameter.checkFloat(u, 0.0, 1.0)
fpr, tpr = Evaluator.roc(testY, predY)
minExampleIndex = int(numpy.floor((predY.shape[0]-1)*u))
minExampleScore = numpy.flipud(numpy.sort(predY))[minExampleIndex]
intersectInd = numpy.searchsorted(numpy.sort(numpy.unique(predY)), minExampleScore)
intersectInd = numpy.unique(predY).shape[0] - intersectInd
alpha = fpr[intersectInd]
beta = tpr[intersectInd]
localAuc = numpy.sum(0.5*numpy.diff(fpr[0:intersectInd])*(tpr[0:max(intersectInd-1, 0)] + tpr[1:intersectInd]))
localAuc += beta*(1-alpha)
return localAuc
@staticmethod
def precisionFromIndLists(testList, predList):
"""
Measure the precision of a predicted list given the true list. The precision is
|relevant items \cup retrieved items| / |retrieved items|. The items of the
lists are indices.
"""
if len(testList) == 0 or len(predList) == 0:
return 0
import sklearn.metrics
n = max(numpy.max(testList), numpy.max(predList))+1
predY = -1*numpy.ones(n)
predY[predList] = 1
testY = -1*numpy.ones(n)
testY[testList] = 1
return sklearn.metrics.precision_score(testY, predY)
@staticmethod
def recallFromIndLists(testList, predList):
"""
Measure the recall of a predicted list given the true list. The precision is
|relevant items \cap retrieved items| / |relevant items|. The items of the
lists are indices.
"""
import sklearn.metrics
n = max(numpy.max(testList), numpy.max(predList))+1
predY = -1*numpy.ones(n)
predY[predList] = 1
testY = -1*numpy.ones(n)
testY[testList] = 1
return sklearn.metrics.recall_score(testY, predY)
@staticmethod
def f1FromIndLists(testList, predList):
"""
Measure the recall of a predicted list given the true list. The precision is
|relevant items \cap retrieved items| / |relevant items|. The items of the
lists are indices.
"""
import sklearn.metrics
n = max(numpy.max(testList), numpy.max(predList))+1
predY = -1*numpy.ones(n)
predY[predList] = 1
testY = -1*numpy.ones(n)
testY[testList] = 1
return sklearn.metrics.f1_score(testY, predY)
@staticmethod
def averagePrecisionFromLists(testList, predList, k=100):
"""
Computes the average precision at k. Borrowed from https://github.com/benhamner/Metrics.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
testList : list
A list of elements that are to be predicted (order doesn't matter)
predList : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predList)>k:
predList = predList[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predList):
if p in testList and p not in predList[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not testList:
return 1.0
return score / min(len(testList), k)
@staticmethod
def meanAveragePrecisionFromLists(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
print(actual, predicted)
return numpy.mean([Evaluator.averagePrecisionFromLists(a,p,k) for a,p in zip(actual, predicted)])
@staticmethod
def ndcg(testY, predY, n):
"""
Compute the Normalised Discounted Cumulative Gain at N. The relevance
of the items in testY is 1 otherwise the item has relevance 0.
:param testY: A partial list of indices
:param predY: A list of predicted indices
"""
raise ValueError("Method not implemented completely")
testY = testY[0:n]
predY = predY[0:n]
m = max(numpy.max(testY), numpy.max(predY))+1
rel = numpy.zeros(m)
rel[predY] = 1
dcg = rel[0] + rel[1:]/numpy.log2(numpy.arange(2, m+1, dtype=numpy.float))
dcg = dcg.sum()
rel = numpy.zeros(m)
rel[testY] = 1
dcg2 = rel[0] + rel[1:]/numpy.log2(numpy.arange(2, m+1, dtype=numpy.float))
dcg2 = dcg2.sum()
return dcg/dcg2
|
gpl-3.0
|
JosmanPS/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
142
|
17496
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
bsd-3-clause
|
NelisVerhoef/scikit-learn
|
sklearn/utils/tests/test_utils.py
|
215
|
8100
|
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
|
bsd-3-clause
|
datapythonista/pandas
|
asv_bench/benchmarks/arithmetic.py
|
3
|
13271
|
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import (
DataFrame,
Series,
Timestamp,
date_range,
to_timedelta,
)
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
try:
import pandas.tseries.holiday
except ImportError:
pass
class IntFrameWithScalar:
params = [
[np.float64, np.int64],
[2, 3.0, np.int32(4), np.float64(5)],
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
],
]
param_names = ["dtype", "scalar", "op"]
def setup(self, dtype, scalar, op):
arr = np.random.randn(20000, 100)
self.df = DataFrame(arr.astype(dtype))
def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
class OpWithFillValue:
def setup(self):
# GH#31300
arr = np.arange(10 ** 6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
def time_frame_op_with_fill_value_no_nas(self):
self.df.add(self.df, fill_value=4)
def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
class MixedFrameWithSeriesAxis:
params = [
[
"eq",
"ne",
"lt",
"le",
"ge",
"gt",
"add",
"sub",
"truediv",
"floordiv",
"mul",
"pow",
]
]
param_names = ["opname"]
def setup(self, opname):
arr = np.arange(10 ** 6).reshape(1000, -1)
df = DataFrame(arr)
df["C"] = 1.0
self.df = df
self.ser = df[0]
self.row = df.iloc[0]
def time_frame_op_with_series_axis0(self, opname):
getattr(self.df, opname)(self.ser, axis=0)
def time_frame_op_with_series_axis1(self, opname):
getattr(operator, opname)(self.df, self.ser)
class FrameWithFrameWide:
# Many-columns, mixed dtypes
params = [
[
# GH#32779 has discussion of which operators are included here
operator.add,
operator.floordiv,
operator.gt,
],
[
# (n_rows, n_columns)
(1_000_000, 10),
(100_000, 100),
(10_000, 1000),
(1000, 10_000),
],
]
param_names = ["op", "shape"]
def setup(self, op, shape):
# we choose dtypes so as to make the blocks
# a) not perfectly match between right and left
# b) appreciably bigger than single columns
n_rows, n_cols = shape
if op is operator.floordiv:
# floordiv is much slower than the other operations -> use less data
n_rows = n_rows // 10
# construct dataframe with 2 blocks
arr1 = np.random.randn(n_rows, n_cols // 2).astype("f8")
arr2 = np.random.randn(n_rows, n_cols // 2).astype("f4")
df = pd.concat([DataFrame(arr1), DataFrame(arr2)], axis=1, ignore_index=True)
# should already be the case, but just to be sure
df._consolidate_inplace()
# TODO: GH#33198 the setting here shoudlnt need two steps
arr1 = np.random.randn(n_rows, max(n_cols // 4, 3)).astype("f8")
arr2 = np.random.randn(n_rows, n_cols // 2).astype("i8")
arr3 = np.random.randn(n_rows, n_cols // 4).astype("f8")
df2 = pd.concat(
[DataFrame(arr1), DataFrame(arr2), DataFrame(arr3)],
axis=1,
ignore_index=True,
)
# should already be the case, but just to be sure
df2._consolidate_inplace()
self.left = df
self.right = df2
def time_op_different_blocks(self, op, shape):
# blocks (and dtypes) are not aligned
op(self.left, self.right)
def time_op_same_blocks(self, op, shape):
# blocks (and dtypes) are aligned
op(self.left, self.left)
class Ops:
params = [[True, False], ["default", 1]]
param_names = ["use_numexpr", "threads"]
def setup(self, use_numexpr, threads):
self.df = DataFrame(np.random.randn(20000, 100))
self.df2 = DataFrame(np.random.randn(20000, 100))
if threads != "default":
expr.set_numexpr_threads(threads)
if not use_numexpr:
expr.set_use_numexpr(False)
def time_frame_add(self, use_numexpr, threads):
self.df + self.df2
def time_frame_mult(self, use_numexpr, threads):
self.df * self.df2
def time_frame_multi_and(self, use_numexpr, threads):
self.df[(self.df > 0) & (self.df2 > 0)]
def time_frame_comparison(self, use_numexpr, threads):
self.df > self.df2
def teardown(self, use_numexpr, threads):
expr.set_use_numexpr(True)
expr.set_numexpr_threads()
class Ops2:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
self.df_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.df2_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.s = Series(np.random.randn(N))
# Division
def time_frame_float_div(self):
self.df // self.df2
def time_frame_float_div_by_zero(self):
self.df / 0
def time_frame_float_floor_by_zero(self):
self.df // 0
def time_frame_int_div_by_zero(self):
self.df_int / 0
# Modulo
def time_frame_int_mod(self):
self.df_int % self.df2_int
def time_frame_float_mod(self):
self.df % self.df2
# Dot product
def time_frame_dot(self):
self.df.dot(self.df2)
def time_series_dot(self):
self.s.dot(self.s)
def time_frame_series_dot(self):
self.df.dot(self.s)
class Timeseries:
params = [None, "US/Eastern"]
param_names = ["tz"]
def setup(self, tz):
N = 10 ** 6
halfway = (N // 2) - 1
self.s = Series(date_range("20010101", periods=N, freq="T", tz=tz))
self.ts = self.s[halfway]
self.s2 = Series(date_range("20010101", periods=N, freq="s", tz=tz))
def time_series_timestamp_compare(self, tz):
self.s <= self.ts
def time_timestamp_series_compare(self, tz):
self.ts >= self.s
def time_timestamp_ops_diff(self, tz):
self.s2.diff()
def time_timestamp_ops_diff_with_shift(self, tz):
self.s - self.s.shift()
class IrregularOps:
def setup(self):
N = 10 ** 5
idx = date_range(start="1/1/2000", periods=N, freq="s")
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
def time_add(self):
self.left + self.right
class TimedeltaOps:
def setup(self):
self.td = to_timedelta(np.arange(1000000))
self.ts = Timestamp("2000")
def time_add_td_ts(self):
self.td + self.ts
class CategoricalComparisons:
params = ["__lt__", "__le__", "__eq__", "__ne__", "__ge__", "__gt__"]
param_names = ["op"]
def setup(self, op):
N = 10 ** 5
self.cat = pd.Categorical(list("aabbcd") * N, ordered=True)
def time_categorical_op(self, op):
getattr(self.cat, op)("b")
class IndexArithmetic:
params = ["float", "int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10 ** 6
indexes = {"int": "makeIntIndex", "float": "makeFloatIndex"}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class NumericInferOps:
# from GH 7332
params = numeric_dtypes
param_names = ["dtype"]
def setup(self, dtype):
N = 5 * 10 ** 5
self.df = DataFrame(
{"A": np.arange(N).astype(dtype), "B": np.arange(N).astype(dtype)}
)
def time_add(self, dtype):
self.df["A"] + self.df["B"]
def time_subtract(self, dtype):
self.df["A"] - self.df["B"]
def time_multiply(self, dtype):
self.df["A"] * self.df["B"]
def time_divide(self, dtype):
self.df["A"] / self.df["B"]
def time_modulo(self, dtype):
self.df["A"] % self.df["B"]
class DateInferOps:
# from GH 7332
def setup_cache(self):
N = 5 * 10 ** 5
df = DataFrame({"datetime64": np.arange(N).astype("datetime64[ms]")})
df["timedelta"] = df["datetime64"] - df["datetime64"]
return df
def time_subtract_datetimes(self, df):
df["datetime64"] - df["datetime64"]
def time_timedelta_plus_datetime(self, df):
df["timedelta"] + df["datetime64"]
def time_add_timedeltas(self, df):
df["timedelta"] + df["timedelta"]
class AddOverflowScalar:
params = [1, -1, 0]
param_names = ["scalar"]
def setup(self, scalar):
N = 10 ** 6
self.arr = np.arange(N)
def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
class AddOverflowArray:
def setup(self):
N = 10 ** 6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
self.arr_mixed = np.array([1, -1]).repeat(N / 2)
self.arr_nan_1 = np.random.choice([True, False], size=N)
self.arr_nan_2 = np.random.choice([True, False], size=N)
def time_add_overflow_arr_rev(self):
checked_add_with_arr(self.arr, self.arr_rev)
def time_add_overflow_arr_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1)
def time_add_overflow_b_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, b_mask=self.arr_nan_1)
def time_add_overflow_both_arg_nan(self):
checked_add_with_arr(
self.arr, self.arr_mixed, arr_mask=self.arr_nan_1, b_mask=self.arr_nan_2
)
hcal = pd.tseries.holiday.USFederalHolidayCalendar()
# These offsets currently raise a NotImplimentedError with .apply_index()
non_apply = [
pd.offsets.Day(),
pd.offsets.BYearEnd(),
pd.offsets.BYearBegin(),
pd.offsets.BQuarterEnd(),
pd.offsets.BQuarterBegin(),
pd.offsets.BMonthEnd(),
pd.offsets.BMonthBegin(),
pd.offsets.CustomBusinessDay(),
pd.offsets.CustomBusinessDay(calendar=hcal),
pd.offsets.CustomBusinessMonthBegin(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
]
other_offsets = [
pd.offsets.YearEnd(),
pd.offsets.YearBegin(),
pd.offsets.QuarterEnd(),
pd.offsets.QuarterBegin(),
pd.offsets.MonthEnd(),
pd.offsets.MonthBegin(),
pd.offsets.DateOffset(months=2, days=2),
pd.offsets.BusinessDay(),
pd.offsets.SemiMonthEnd(),
pd.offsets.SemiMonthBegin(),
]
offsets = non_apply + other_offsets
class OffsetArrayArithmetic:
params = offsets
param_names = ["offset"]
def setup(self, offset):
N = 10000
rng = date_range(start="1/1/2000", periods=N, freq="T")
self.rng = rng
self.ser = Series(rng)
def time_add_series_offset(self, offset):
with warnings.catch_warnings(record=True):
self.ser + offset
def time_add_dti_offset(self, offset):
with warnings.catch_warnings(record=True):
self.rng + offset
class ApplyIndex:
params = other_offsets
param_names = ["offset"]
def setup(self, offset):
N = 10000
rng = date_range(start="1/1/2000", periods=N, freq="T")
self.rng = rng
def time_apply_index(self, offset):
self.rng + offset
class BinaryOpsMultiIndex:
params = ["sub", "add", "mul", "div"]
param_names = ["func"]
def setup(self, func):
array = date_range("20200101 00:00", "20200102 0:00", freq="S")
level_0_names = [str(i) for i in range(30)]
index = pd.MultiIndex.from_product([level_0_names, array])
column_names = ["col_1", "col_2"]
self.df = DataFrame(
np.random.rand(len(index), 2), index=index, columns=column_names
)
self.arg_df = DataFrame(
np.random.randint(1, 10, (len(level_0_names), 2)),
index=level_0_names,
columns=column_names,
)
def time_binary_op_multiindex(self, func):
getattr(self.df, func)(self.arg_df, level=0)
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
bsd-3-clause
|
zuku1985/scikit-learn
|
examples/plot_kernel_ridge_regression.py
|
19
|
6269
|
"""
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
plt.plot(train_sizes, -test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, -test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
aewhatley/scikit-learn
|
sklearn/feature_extraction/tests/test_image.py
|
205
|
10378
|
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
|
bsd-3-clause
|
google-research/google-research
|
milking_cowmask/data_sources/small_image_data_source.py
|
1
|
11564
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CIFAR-10/CIFAR-100/SVHN input pipeline.
"""
from sklearn.model_selection import StratifiedShuffleSplit
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
HEIGHT = 32
WIDTH = 32
NUM_CHANNELS = 3
def _augment_image(image, xlat=4, flip_lr=True):
"""Augment small image with random crop and h-flip.
Args:
image: image to augment
xlat: random offset range
flip_lr: if True perform random horizontal flip
Returns:
augmented image
"""
if xlat > 0:
# Pad with reflection padding
# (See https://arxiv.org/abs/1605.07146)
# Section 3
image = tf.pad(image, [[xlat, xlat],
[xlat, xlat], [0, 0]], 'REFLECT')
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
if flip_lr:
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
return image
def _preprocess_train_image(image, mean_rgb, stddev_rgb):
image = tf.cast(image, tf.float32)
image = _augment_image(image)
image = (image - mean_rgb) / stddev_rgb
return image
def _preprocess_eval_image(image, mean_rgb, stddev_rgb):
image = tf.cast(image, tf.float32)
image = (image - mean_rgb) / stddev_rgb
return image
class AbstractSmallImageDataSource(object):
"""Abstract small image data source."""
MEAN_RGB = [0.5 * 255, 0.5 * 255, 0.5 * 255]
STDDEV_RGB = [1.0, 1.0, 1.0]
AUG_CROP_PADDING = 0
AUG_FLIP_LR = False
N_CLASSES = None
TRAIN_IMAGES = None
TEST_IMAGES = None
def __init__(self, n_val, n_sup, train_batch_size, eval_batch_size,
augment_twice, subset_seed=12345, val_seed=131):
"""Constructor.
Args:
n_val: number of validation samples to hold out from training set
n_sup: number of samples for supervised learning
train_batch_size: batch size for training
eval_batch_size: batch_size for evaluation
augment_twice: should unsupervised sample pairs be augmented differently
subset_seed: the random seed used to choose the supervised samples
val_seed: the random seed used to choose the hold out validation samples
Attributes:
n_train: number of training samples
n_sup: number of supervised samples
n_val: number of validation samples
n_test: number of test samples
train_semisup_ds: Semi-supervised training dataset
train_unsup_ds: Unsupervised training dataset
train_sup_ds: Supervised training dataset
val_ds: Validation dataset
test_ds: Test dataset
n_classes: number of classes
"""
mean_rgb = tf.constant(self.MEAN_RGB, shape=[1, 1, 3], dtype=tf.float32)
stddev_rgb = tf.constant(self.STDDEV_RGB, shape=[1, 1, 3],
dtype=tf.float32)
#
# Get data
#
base_train_ds = self._load_train_set()
@tf.function
def get_train():
return next(iter(base_train_ds.batch(self.TRAIN_IMAGES)))
trainval = get_train()
#
# Split dataset into train and validation, if requested
#
if n_val > 0:
train_val_splitter = StratifiedShuffleSplit(
1, test_size=n_val, random_state=val_seed)
train_ndx, val_ndx = next(train_val_splitter.split(
trainval['label'], trainval['label']))
X_train = trainval['image'].numpy()[train_ndx] # pylint: disable=invalid-name
y_train = trainval['label'].numpy()[train_ndx]
X_val = trainval['image'].numpy()[val_ndx] # pylint: disable=invalid-name
y_val = trainval['label'].numpy()[val_ndx]
else:
X_train = trainval['image'].numpy() # pylint: disable=invalid-name
y_train = trainval['label'].numpy()
X_val = None # pylint: disable=invalid-name
y_val = None
train_ds = tf.data.Dataset.from_tensor_slices(
{'image': X_train, 'label': y_train}
).cache()
#
# Select supervised subset
#
if n_sup == -1:
n_sup = self.TRAIN_IMAGES
if n_sup < self.TRAIN_IMAGES:
splitter = StratifiedShuffleSplit(1, test_size=n_sup,
random_state=subset_seed)
_, sup_ndx = next(splitter.split(y_train, y_train))
X_sup = X_train[sup_ndx] # pylint: disable=invalid-name
y_sup = y_train[sup_ndx]
train_sup_ds = tf.data.Dataset.from_tensor_slices(
{'image': X_sup, 'label': y_sup}
).cache()
else:
train_sup_ds = train_ds
X_sup = X_train # pylint: disable=invalid-name
y_sup = y_train
train_unsup_ds = train_ds
train_sup_ds = train_sup_ds.repeat()
train_sup_ds = train_sup_ds.shuffle(16 * train_batch_size)
train_unsup_ds = train_unsup_ds.repeat()
train_unsup_ds = train_unsup_ds.shuffle(16 * train_batch_size)
train_semisup_ds = tf.data.Dataset.zip((train_sup_ds, train_unsup_ds))
# Sample augmentation functions
def _augment_sup(sup_sample):
"""Augment supervised sample."""
sample = {
'sup_image': _preprocess_train_image(
sup_sample['image'], mean_rgb, stddev_rgb),
'sup_label': sup_sample['label'],
}
return sample
def _augment_unsup_once(unsup_sample):
"""Augment unsupervised sample, single augmentation."""
unsup_x0 = _preprocess_train_image(
unsup_sample['image'], mean_rgb, stddev_rgb)
sample = {
'unsup_image0': unsup_x0,
'unsup_image1': unsup_x0,
}
return sample
def _augment_unsup_twice(unsup_sample):
"""Augment unsupervised sample, two augmentations."""
sample = {
'unsup_image0': _preprocess_train_image(
unsup_sample['image'], mean_rgb, stddev_rgb),
'unsup_image1': _preprocess_train_image(
unsup_sample['image'], mean_rgb, stddev_rgb),
}
return sample
def _augment_semisup_once(sup_sample, unsup_sample):
"""Augment semi-supervised sample, single augmentation."""
unsup_x0 = _preprocess_train_image(
unsup_sample['image'], mean_rgb, stddev_rgb)
semisup_sample = {
'sup_image': _preprocess_train_image(
sup_sample['image'], mean_rgb, stddev_rgb),
'sup_label': sup_sample['label'],
'unsup_image0': unsup_x0,
'unsup_image1': unsup_x0,
}
return semisup_sample
def _augment_semisup_twice(sup_sample, unsup_sample):
"""Augment semi-supervised sample, two augmentations."""
semisup_sample = {
'sup_image': _preprocess_train_image(
sup_sample['image'], mean_rgb, stddev_rgb),
'sup_label': sup_sample['label'],
'unsup_image0': _preprocess_train_image(
unsup_sample['image'], mean_rgb, stddev_rgb),
'unsup_image1': _preprocess_train_image(
unsup_sample['image'], mean_rgb, stddev_rgb),
}
return semisup_sample
def _eval_map_fn(x):
"""Pre-process evaluation sample."""
image = _preprocess_eval_image(x['image'], mean_rgb, stddev_rgb)
batch = {'image': image, 'label': x['label']}
return batch
if augment_twice:
train_semisup_ds = train_semisup_ds.map(_augment_semisup_twice,
num_parallel_calls=128)
train_unsup_only_ds = train_unsup_ds.map(_augment_unsup_twice,
num_parallel_calls=128)
else:
train_semisup_ds = train_semisup_ds.map(_augment_semisup_once,
num_parallel_calls=128)
train_unsup_only_ds = train_unsup_ds.map(_augment_unsup_once,
num_parallel_calls=128)
train_sup_only_ds = train_sup_ds.map(_augment_sup,
num_parallel_calls=128)
train_semisup_ds = train_semisup_ds.batch(train_batch_size,
drop_remainder=True)
train_unsup_only_ds = train_unsup_only_ds.batch(train_batch_size,
drop_remainder=True)
train_sup_only_ds = train_sup_only_ds.batch(train_batch_size,
drop_remainder=True)
train_semisup_ds = train_semisup_ds.prefetch(10)
train_unsup_only_ds = train_unsup_only_ds.prefetch(10)
train_sup_only_ds = train_sup_only_ds.prefetch(10)
self.train_semisup_ds = train_semisup_ds
self.train_unsup_ds = train_unsup_only_ds
self.train_sup_ds = train_sup_only_ds
#
# Validation set
#
if n_val > 0:
val_ds = tf.data.Dataset.from_tensor_slices(
{'image': X_val, 'label': y_val}
).cache()
val_ds = val_ds.map(_eval_map_fn, num_parallel_calls=128)
val_ds = val_ds.batch(eval_batch_size)
val_ds = val_ds.repeat()
val_ds = val_ds.prefetch(10)
self.val_ds = val_ds
else:
self.val_ds = None
#
# Test set
#
test_ds = self._load_test_set().cache()
test_ds = test_ds.map(_eval_map_fn, num_parallel_calls=128)
test_ds = test_ds.batch(eval_batch_size)
test_ds = test_ds.repeat()
test_ds = test_ds.prefetch(10)
self.test_ds = test_ds
self.n_train = len(y_train)
self.n_val = n_val
self.n_sup = len(y_sup)
self.n_test = self.TEST_IMAGES
self.n_classes = self.N_CLASSES
def _load_train_set(self):
raise NotImplementedError('Abstract')
def _load_test_set(self):
raise NotImplementedError('Abstract')
class CIFAR10DataSource(AbstractSmallImageDataSource):
"""CIFAR-10 data source."""
TRAIN_IMAGES = 50000
TEST_IMAGES = 10000
N_CLASSES = 10
MEAN_RGB = [0.4914 * 255, 0.4822 * 255, 0.4465 * 255]
STDDEV_RGB = [0.2470 * 255, 0.2435 * 255, 0.2616 * 255]
AUG_CROP_PADDING = 4
AUG_FLIP_LR = True
def _load_train_set(self):
return tfds.load('cifar10', split='train')
def _load_test_set(self):
return tfds.load('cifar10', split='test')
class CIFAR100DataSource(AbstractSmallImageDataSource):
"""CIFAR-100 data source."""
TRAIN_IMAGES = 50000
TEST_IMAGES = 10000
N_CLASSES = 100
MEAN_RGB = [0.5071 * 255, 0.4866 * 255, 0.4409 * 255]
STDDEV_RGB = [0.2673 * 255, 0.2564 * 255, 0.2761 * 255]
AUG_CROP_PADDING = 4
AUG_FLIP_LR = True
def _load_train_set(self):
return tfds.load('cifar100', split='train')
def _load_test_set(self):
return tfds.load('cifar100', split='test')
class SVHNDataSource(AbstractSmallImageDataSource):
"""SVHN data source."""
TRAIN_IMAGES = 73257
TEST_IMAGES = 26032
N_CLASSES = 10
MEAN_RGB = [0.4377 * 255, 0.4438 * 255, 0.4728 * 255]
STDDEV_RGB = [0.1980 * 255, 0.2010 * 255, 0.1970 * 255]
AUG_CROP_PADDING = 4
AUG_FLIP_LR = False # SVHN digits should *not* be flipped
def _load_train_set(self):
return tfds.load('svhn_cropped', split='train')
def _load_test_set(self):
return tfds.load('svhn_cropped', split='test')
|
apache-2.0
|
ElsevierDev/elsapy
|
elsapy/utils.py
|
1
|
1568
|
# -*- coding: utf-8 -*-
"""An elsapy module that contains decorators and other utilities to make the
project more maintainable.
"""
import pandas as pd
from . import log_util
logger = log_util.get_logger(__name__)
def recast_df(df):
'''Recasts a data frame so that it has proper date fields and a more
useful data structure for URLs'''
int_resp_fields = [
'document-count',
'citedby-count',
]
date_resp_fields = [
'prism:coverDate',
]
# Modify data structure for storing links/URLs in a DF
if 'link' in df.columns:
if '@rel' in df.link[0][0].keys():
# To deal with inconsistency. In some API responses, the link type
# field uses '@rel' as key; in others, it uses '@ref'.
link_type_key ='@rel'
else:
link_type_key = '@ref'
df['link'] = df.link.apply(
lambda x: dict([(e[link_type_key], e['@href']) for e in x]))
# Recast fields that contain integers from strings to the integer type
for int_field in int_resp_fields:
if int_field in df.columns:
df[int_field] = df[int_field].apply(
int)
# Recast fields that contain datetime from strings to a datetime type
for date_field in date_resp_fields:
if date_field in df.columns:
logger.info("Converting {}".format(date_field))
df[date_field] = df[date_field].apply(
pd.Timestamp)
return df
|
bsd-3-clause
|
nok/sklearn-porter
|
sklearn_porter/estimator/classifier/ExtraTreesClassifier/__init__.py
|
1
|
1113
|
# -*- coding: utf-8 -*-
from sklearn_porter.estimator.classifier.RandomForestClassifier \
import RandomForestClassifier
class ExtraTreesClassifier(RandomForestClassifier):
"""
See also
--------
sklearn.ensemble.ExtraTreesClassifier
http://scikit-learn.org/stable/modules/generated/
sklearn.ensemble.ExtraTreesClassifier.html
"""
def __init__(self, estimator, target_language='java',
target_method='predict', **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param estimator : ExtraTreesClassifier
An instance of a trained ExtraTreesClassifier estimator.
:param target_language : string, default: 'java'
The target programming language.
:param target_method : string, default: 'predict'
The target method of the estimator.
"""
super(ExtraTreesClassifier, self).__init__(
estimator, target_language=target_language,
target_method=target_method, **kwargs)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.