repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
michael-pacheco/dota2-predictor | training/query.py | 2 | 7404 | """ Module responsible for querying the result of a game """
import operator
import os
import logging
import numpy as np
from os import listdir
from sklearn.externals import joblib
from preprocessing.augmenter import augment_with_advantages
from tools.metadata import get_hero_dict, get_last_patch
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _query_missing(model,
scaler,
radiant_heroes,
dire_heroes,
synergies,
counters,
similarities,
heroes_released):
""" Query the best missing hero that can be picked given 4 heroes in one
team and 5 heroes in the other.
Args:
model: estimator that has fitted the data
scaler: the scaler used for fitting the data
radiant_heroes: list of hero IDs from radiant team
dire_heroes: list of hero IDs from dire team
synergies: matrix defining the synergy scores between heroes
counters: matrix defining the counter scores between heroes
similarities: matrix defining similarities between heroes
heroes_released: number of heroes released in the queried patch
Returns:
list of variable length containing hero suggestions
"""
all_heroes = radiant_heroes + dire_heroes
base_similarity_radiant = 0
base_similarity_dire = 0
radiant = len(radiant_heroes) == 4
for i in range(4):
for j in range(4):
if i > j:
base_similarity_radiant += similarities[radiant_heroes[i], radiant_heroes[j]]
base_similarity_dire += similarities[dire_heroes[i], dire_heroes[j]]
query_base = np.zeros((heroes_released, 2 * heroes_released + 3))
for i in range(heroes_released):
if radiant:
radiant_heroes.append(i + 1)
else:
dire_heroes.append(i + 1)
for j in range(5):
query_base[i][radiant_heroes[j] - 1] = 1
query_base[i][dire_heroes[j] - 1 + heroes_released] = 1
query_base[i][-3:] = augment_with_advantages(synergies,
counters,
radiant_heroes,
dire_heroes)
if radiant:
del radiant_heroes[-1]
else:
del dire_heroes[-1]
if radiant:
probabilities = model.predict_proba(scaler.transform(query_base))[:, 1]
else:
probabilities = model.predict_proba(scaler.transform(query_base))[:, 0]
heroes_dict = get_hero_dict()
similarities_list = []
results_dict = {}
for i, prob in enumerate(probabilities):
if i + 1 not in all_heroes and i != 23:
if radiant:
similarity_new = base_similarity_radiant
for j in range(4):
similarity_new += similarities[i + 1][radiant_heroes[j]]
similarities_list.append(similarity_new)
else:
similarity_new = base_similarity_dire
for j in range(4):
similarity_new += similarities[i + 1][dire_heroes[j]]
similarities_list.append(similarity_new)
results_dict[heroes_dict[i + 1]] = (prob, similarity_new)
results_list = sorted(results_dict.items(), key=operator.itemgetter(1), reverse=True)
similarities_list.sort()
max_similarity_allowed = similarities_list[len(similarities_list) / 4]
filtered_list = [x for x in results_list if x[1][1] < max_similarity_allowed]
return filtered_list
def _query_full(model,
scaler,
radiant_heroes,
dire_heroes,
synergies,
counters,
heroes_released):
""" Query the result of a game when both teams have their line-ups
finished.
Args:
model: estimator that has fitted the data
scaler: the scaler used for fitting the data
radiant_heroes: list of hero IDs from radiant team
dire_heroes: list of hero IDs from dire team
synergies: matrix defining the synergy scores between heroes
counters: matrix defining the counter scores between heroes
heroes_released: number of heroes released in the queried patch
Returns:
string with info about the predicted winner team
"""
features = np.zeros(2 * heroes_released + 3)
for i in range(5):
features[radiant_heroes[i] - 1] = 1
features[dire_heroes[i] - 1 + heroes_released] = 1
extra_data = augment_with_advantages(synergies, counters, radiant_heroes, dire_heroes)
features[-3:] = extra_data
features_reshaped = features.reshape(1, -1)
features_final = scaler.transform(features_reshaped)
probability = model.predict_proba(features_final)[:, 1] * 100
if probability > 50:
return "Radiant has %.3f%% chance" % probability
else:
return "Dire has %.3f%% chance" % (100 - probability)
def query(mmr,
radiant_heroes,
dire_heroes,
synergies=None,
counters=None,
similarities=None):
if similarities is None:
sims = np.loadtxt('pretrained/similarities_all.csv')
else:
sims = np.loadtxt(similarities)
if counters is None:
cnts = np.loadtxt('pretrained/counters_all.csv')
else:
cnts = np.loadtxt(counters)
if synergies is None:
syns = np.loadtxt('pretrained/synergies_all.csv')
else:
syns = np.loadtxt(synergies)
if mmr < 0 or mmr > 10000:
logger.error("MMR should be a number between 0 and 10000")
return
if mmr < 2000:
model_dict = joblib.load(os.path.join("pretrained", "2000-.pkl"))
logger.info("Using 0-2000 MMR model")
elif mmr > 5000:
model_dict = joblib.load(os.path.join("pretrained", "5000+.pkl"))
logger.info("Using 5000-10000 MMR model")
else:
file_list = [int(valid_file[:4]) for valid_file in listdir('pretrained')
if '.pkl' in valid_file]
file_list.sort()
min_distance = 10000
final_mmr = -1000
for model_mmr in file_list:
if abs(mmr - model_mmr) < min_distance:
min_distance = abs(mmr - model_mmr)
final_mmr = model_mmr
logger.info("Using closest model available: %d MMR model", final_mmr)
model_dict = joblib.load(os.path.join("pretrained", str(final_mmr) + ".pkl"))
scaler = model_dict['scaler']
model = model_dict['model']
last_patch_info = get_last_patch()
heroes_released = last_patch_info['heroes_released']
if len(radiant_heroes) + len(dire_heroes) == 10:
return _query_full(model,
scaler,
radiant_heroes,
dire_heroes,
syns,
cnts,
heroes_released)
return _query_missing(model,
scaler,
radiant_heroes,
dire_heroes,
syns,
cnts,
sims,
heroes_released)
| mit |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/backends/qt4_compat.py | 12 | 3172 | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
ETS = dict(pyqt=QT_API_PYQTv2, pyside=QT_API_PYSIDE)
# If the ETS QT_API environment variable is set, use it. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if QT_API_ENV is not None:
try:
QT_API = ETS[QT_API_ENV]
except KeyError:
raise RuntimeError(
'Unrecognized environment variable %r, valid values are: %r or %r' %
(QT_API_ENV, 'pyqt', 'pyside'))
else:
# No ETS environment, so use rcParams.
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2):
import sip
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond+res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond+res, 'helpful')
from PyQt4 import QtCore, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
QtCore.Slot = pyqtSignature # Not a perfect match but
# works in simple cases
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
try :
if sip.getapi("QString") > 1 :
# Use new getSaveFileNameAndFilter()
_get_save = QtGui.QFileDialog.getSaveFileNameAndFilter
else :
# Use old getSaveFileName()
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
except (AttributeError, KeyError) :
# call to getapi() can fail in older versions of sip
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
else: # can only be pyside
from PySide import QtCore, QtGui, __version__, __version_info__
if __version_info__ < (1,0,3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_get_save = QtGui.QFileDialog.getSaveFileName
if _getSaveFileName is None:
def _getSaveFileName(self, msg, start, filters, selectedFilter):
return _get_save(self, msg, start, filters, selectedFilter)[0]
| mit |
awacha/cct | cct/processinggui/graphing/corrmat.py | 1 | 8132 | import logging
from typing import Union, Tuple
import matplotlib.cm
import numpy as np
from PyQt5 import QtWidgets
from matplotlib.axes import Axes
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT, FigureCanvasQTAgg
from matplotlib.figure import Figure
from .corrmat_ui import Ui_Form
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class CorrMatView(QtWidgets.QWidget, Ui_Form):
cmat: np.ndarray = None
fsns: np.ndarray = None
axes: Axes = None
figure: Figure = None
canvas: FigureCanvasQTAgg = None
toolbar: NavigationToolbar2QT = None
_figsize: Tuple[float, float] = None
def __init__(self, parent: QtWidgets.QWidget = None, project: "Project" = None,
figsize: Tuple[float, float] = (4, 4)):
super().__init__(parent)
self._figsize = figsize
self.project = project
self.setupUi(self)
def setupUi(self, Form):
super().setupUi(Form)
self.figure = Figure(figsize=self._figsize, constrained_layout=True)
self.canvas = FigureCanvasQTAgg(self.figure)
self.toolbar = NavigationToolbar2QT(self.canvas, self)
self.figureVerticalLayout.addWidget(self.toolbar)
self.figureVerticalLayout.addWidget(self.canvas)
self.axes = self.figure.add_subplot(1, 1, 1)
self.paletteComboBox.addItems(sorted(matplotlib.cm.cmap_d))
self.paletteComboBox.setCurrentIndex(self.paletteComboBox.findText(self.project.config.cmatpalette))
if self.paletteComboBox.currentIndex() < 0:
self.paletteComboBox.setCurrentIndex(0)
self.project.newResultsAvailable.connect(self.onUpdateResults)
self.paletteComboBox.currentIndexChanged.connect(self.onPaletteComboBoxChanged)
self.sampleNameComboBox.currentIndexChanged.connect(self.onSampleNameComboBoxChanged)
self.distanceComboBox.currentIndexChanged.connect(self.onDistanceComboBoxChanged)
def setSampleAndDistance(self, samplename: str, distance: Union[str, float]):
logger.debug('Setting sample name to {}, distance to {}'.format(samplename, distance))
if self.project is not None:
self.onUpdateResults()
logger.debug(
'After onUpdateResults, sample name is at {}, distance at {}'.format(self.sampleNameComboBox.currentText(),
self.distanceComboBox.currentText()))
self.sampleNameComboBox.blockSignals(True)
self.sampleNameComboBox.setCurrentIndex(self.sampleNameComboBox.findText(samplename))
self.sampleNameComboBox.blockSignals(False)
assert self.sampleNameComboBox.currentIndex() >= 0
targetdist = distance if isinstance(distance, str) else '{:.2f}'.format(distance)
self.distanceComboBox.blockSignals(True)
self.distanceComboBox.setCurrentIndex(self.distanceComboBox.findText(targetdist))
self.distanceComboBox.blockSignals(False)
assert self.distanceComboBox.currentIndex() >= 0
self.onDistanceComboBoxChanged() # ensure that the correlation matrix is reloaded
logger.debug('Sample name changed to {}, distance to {}'.format(self.sampleNameComboBox.currentText(),
self.distanceComboBox.currentText()))
logger.debug('Running replot')
self.replot()
def onUpdateResults(self):
logger.debug('onUpdateResults')
if self.project is None:
return
currentSample = self.sampleNameComboBox.currentText() if self.sampleNameComboBox.currentIndex() >= 0 else None
self.sampleNameComboBox.blockSignals(True)
try:
self.sampleNameComboBox.clear()
self.sampleNameComboBox.addItems(sorted(self.project.h5reader.samples()))
target = self.sampleNameComboBox.findText(currentSample) if currentSample is not None else -1
if target < 0:
target = 0
finally:
self.sampleNameComboBox.blockSignals(False)
logger.debug('Setting sample index to {}'.format(target))
self.sampleNameComboBox.setCurrentIndex(target)
self.onSampleNameComboBoxChanged()
def replot(self):
logger.debug('replot')
if self.cmat is None:
self.figure.clear()
self.canvas.draw_idle()
return
self.figure.clear()
self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.clear()
self.axes.set_adjustable('box')
self.axes.set_aspect(1.0)
img = self.axes.imshow(self.cmat, interpolation='nearest',
cmap=self.paletteComboBox.currentText(),
extent=[-0.5, self.cmat.shape[1]-0.5, self.cmat.shape[0]-0.5, -0.5], origin='upper')
logger.debug('CMAT shape: {}'.format(self.cmat.shape))
self.axes.xaxis.set_ticks(np.arange(self.cmat.shape[1]))
self.axes.yaxis.set_ticks(np.arange(self.cmat.shape[0]))
self.axes.xaxis.set_ticklabels([str(f) for f in self.fsns], rotation=90)
self.axes.yaxis.set_ticklabels([str(f) for f in self.fsns])
self.figure.colorbar(img, ax=self.axes)
self.canvas.draw_idle()
if self.sampleNameComboBox.isEnabled() and self.distanceComboBox.isEnabled():
self.setWindowTitle('Correlation matrix: {} @{}'.format(self.sampleNameComboBox.currentText(),
self.distanceComboBox.currentText()))
def onPaletteComboBoxChanged(self):
self.replot()
def onSampleNameComboBoxChanged(self):
logger.debug('Sample name changed to {}'.format(self.sampleNameComboBox.currentText()))
if self.distanceComboBox.currentIndex() < 0:
currentdist = None
else:
currentdist = float(self.distanceComboBox.currentText())
distances = self.project.h5reader.distanceKeys(self.sampleNameComboBox.currentText())
self.distanceComboBox.blockSignals(True)
try:
self.distanceComboBox.clear()
self.distanceComboBox.addItems(sorted(distances, key=lambda x: float(x)))
targetdist = sorted(distances, key=lambda d: abs(float(d) - currentdist))[0] \
if currentdist is not None else distances[0]
finally:
self.distanceComboBox.blockSignals(False)
self.distanceComboBox.setCurrentIndex(self.distanceComboBox.findText(targetdist))
self.onDistanceComboBoxChanged()
def onDistanceComboBoxChanged(self):
logger.debug('Distance changed to {}'.format(self.distanceComboBox.currentText()))
try:
self.cmat = self.project.h5reader.getCorrMat(self.sampleNameComboBox.currentText(),
self.distanceComboBox.currentText())
self.fsns = np.array(sorted(list(
self.project.h5reader.getCurveParameter(self.sampleNameComboBox.currentText(),
self.distanceComboBox.currentText(), 'fsn').keys())))
logger.debug('Got cmat and fsns from sample {}, distance {}'.format(self.sampleNameComboBox.currentText(),
self.distanceComboBox.currentText()))
logger.debug('cmat shape: {}. FSNS length: {}'.format(self.cmat.shape, len(self.fsns)))
except OSError:
return
self.replot()
def savefig(self, filename: str, **kwargs):
self.canvas.draw()
self.figure.savefig(
filename,
# format=None # infer the format from the file name
transparent=True, # all patches will be transparent, instead of opaque white
optimize=True, # optimize JPEG file, ignore for other file types
progressive=True, # progressive JPEG, ignore for other file types
quality=95, # JPEG quality, ignore for other file types
**kwargs
)
| bsd-3-clause |
minesense/VisTrails | vistrails/packages/sklearn/tests.py | 2 | 12976 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import numpy as np
import unittest
from vistrails.tests.utils import execute, intercept_results
from vistrails.packages.sklearn.init import (Digits, Iris, TrainTestSplit,
Predict, Score, Transform,
CrossValScore, _modules,
GridSearchCV)
from vistrails.packages.sklearn import identifier
from sklearn.metrics import f1_score
def class_by_name(name):
"""Returns an autogenerated class from _modules from a string name."""
for module in _modules:
if module.__name__ == name:
return module
class TestSklearn(unittest.TestCase):
def test_digits(self):
# check that the digits dataset can be loaded
with intercept_results(Digits, 'data', Digits, 'target') as (data, target):
self.assertFalse(execute([
('datasets|Digits', identifier, [])
]))
data = np.vstack(data)
target = np.hstack(target)
self.assertEqual(data.shape, (1797, 64))
self.assertEqual(target.shape, (1797,))
def test_iris(self):
# check that the iris dataset can be loaded
with intercept_results(Iris, 'data', Iris, 'target') as (data, target):
self.assertFalse(execute([
('datasets|Iris', identifier, [])
]))
data = np.vstack(data)
target = np.hstack(target)
self.assertEqual(data.shape, (150, 4))
self.assertEqual(target.shape, (150,))
def test_train_test_split(self):
# check that we can split the iris dataset
with intercept_results(TrainTestSplit, 'training_data', TrainTestSplit,
'training_target', TrainTestSplit, 'test_data',
TrainTestSplit, 'test_target') as results:
X_train, y_train, X_test, y_test = results
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('cross-validation|TrainTestSplit', identifier,
[('test_size', [('Integer', '50')])])
],
[
(0, 'data', 1, 'data'),
(0, 'target', 1, 'target')
]
))
X_train = np.vstack(X_train)
X_test = np.vstack(X_test)
y_train = np.hstack(y_train)
y_test = np.hstack(y_test)
self.assertEqual(X_train.shape, (100, 4))
self.assertEqual(X_test.shape, (50, 4))
self.assertEqual(y_train.shape, (100,))
self.assertEqual(y_test.shape, (50,))
def test_classifier_training_predict(self):
with intercept_results(Predict, 'prediction', Predict,
'decision_function', TrainTestSplit, 'test_target',
Score, 'score') as results:
y_pred, decision_function, y_test, score = results
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('cross-validation|TrainTestSplit', identifier,
[('test_size', [('Integer', '50')])]),
('classifiers|LinearSVC', identifier, []),
('Predict', identifier, []),
('Score', identifier, []),
# use custom metric
('Score', identifier,
[('metric', [('String', 'f1')])]),
],
[
# train test split
(0, 'data', 1, 'data'),
(0, 'target', 1, 'target'),
# fit LinearSVC on training data
(1, 'training_data', 2, 'training_data'),
(1, 'training_target', 2, 'training_target'),
# predict on test data
(2, 'model', 3, 'model'),
(1, 'test_data', 3, 'data'),
# score test data
(2, 'model', 4, 'model'),
(1, 'test_data', 4, 'data'),
(1, 'test_target', 4, 'target'),
# f1 scorer
(2, 'model', 5, 'model'),
(1, 'test_data', 5, 'data'),
(1, 'test_target', 5, 'target')
]
))
y_pred = np.hstack(y_pred)
decision_function = np.vstack(decision_function)
y_test = np.hstack(y_test)
# unpack the results from the two scorers
score_acc, score_f1 = score
self.assertEqual(y_pred.shape, (50,))
self.assertTrue(np.all(np.unique(y_pred) == np.array([0, 1, 2])))
self.assertEqual(decision_function.shape, (50, 3))
# some accuracy
self.assertTrue(np.mean(y_test == y_pred) > .8)
# score is actually the accuracy
self.assertEqual(np.mean(y_test == y_pred), score_acc)
# f1 score is actually f1 score
self.assertEqual(f1_score(y_test, y_pred), score_f1)
def test_transformer_supervised_transform(self):
# test feature selection
with intercept_results(Transform, 'transformed_data') as (transformed_data,):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('feature_selection|SelectKBest', identifier,
[('k', [('Integer', '2')])]),
('Transform', identifier, [])
],
[
(0, 'data', 1, 'training_data'),
(0, 'target', 1, 'training_target'),
(1, 'model', 2, 'model'),
(0, 'data', 2, 'data')
]
))
transformed_data = np.vstack(transformed_data)
self.assertEqual(transformed_data.shape, (150, 2))
def test_transformer_unsupervised_transform(self):
# test PCA
with intercept_results(Transform, 'transformed_data') as (transformed_data,):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('decomposition|PCA', identifier,
[('n_components', [('Integer', '2')])]),
('Transform', identifier, [])
],
[
(0, 'data', 1, 'training_data'),
(1, 'model', 2, 'model'),
(0, 'data', 2, 'data')
]
))
transformed_data = np.vstack(transformed_data)
self.assertEqual(transformed_data.shape, (150, 2))
def test_manifold_learning(self):
# test Isomap
with intercept_results(class_by_name("Isomap"), 'transformed_data') as (transformed_data,):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('manifold|Isomap', identifier, []),
],
[
(0, 'data', 1, 'training_data'),
]
))
transformed_data = np.vstack(transformed_data)
self.assertEqual(transformed_data.shape, (150, 2))
def test_cross_val_score(self):
# chech that cross_val score of LinearSVC has the right length
with intercept_results(CrossValScore, 'scores') as (scores,):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('classifiers|LinearSVC', identifier, []),
('cross-validation|CrossValScore', identifier, []),
],
[
(0, 'data', 2, 'data'),
(0, 'target', 2, 'target'),
(1, 'model', 2, 'model')
]
))
scores = np.hstack(scores)
self.assertEqual(scores.shape, (3,))
self.assertTrue(np.mean(scores) > .8)
def test_gridsearchcv(self):
# check that gridsearch on DecisionTreeClassifier does the right number of runs
# and gives the correct result.
with intercept_results(GridSearchCV, 'scores', GridSearchCV,
'best_parameters') as (scores, parameters):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('classifiers|DecisionTreeClassifier', identifier, []),
('GridSearchCV', identifier,
[('parameters', [('Dictionary', "{'max_depth': [1, 2, 3, 4]}")])]),
],
[
(0, 'data', 2, 'data'),
(0, 'target', 2, 'target'),
(1, 'model', 2, 'model')
]
))
self.assertEqual(len(scores[0]), 4)
self.assertTrue(parameters[0]['max_depth'], 2)
def test_pipeline(self):
with intercept_results(Iris, 'target', Predict, 'prediction') as (y_true, y_pred):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('preprocessing|StandardScaler', identifier, []),
('feature_selection|SelectKBest', identifier,
[('k', [('Integer', '2')])]),
('classifiers|LinearSVC', identifier, []),
('Pipeline', identifier, []),
('Predict', identifier, [])
],
[
# feed data to pipeline
(0, 'data', 4, 'training_data'),
(0, 'target', 4, 'training_target'),
# put models in pipeline
(1, 'model', 4, 'model1'),
(2, 'model', 4, 'model2'),
(3, 'model', 4, 'model3'),
# predict using pipeline
(4, 'model', 5, 'model'),
(0, 'data', 5, 'data')
]
))
y_true, y_pred = np.array(y_true[0]), np.array(y_pred[0])
self.assertEqual(y_true.shape, y_pred.shape)
self.assertTrue(np.mean(y_true == y_pred) > .8)
def test_nested_cross_validation(self):
with intercept_results(CrossValScore, 'scores') as (scores, ):
self.assertFalse(execute(
[
('datasets|Iris', identifier, []),
('classifiers|DecisionTreeClassifier', identifier, []),
('GridSearchCV', identifier,
[('parameters', [('Dictionary', "{'max_depth': [1, 2, 3, 4]}")])]),
('cross-validation|CrossValScore', identifier, [])
],
[
(0, 'data', 3, 'data'),
(0, 'target', 3, 'target'),
(1, 'model', 2, 'model'),
(2, 'model', 3, 'model')
]
))
self.assertEqual(len(scores[0]), 3)
self.assertTrue(np.mean(scores[0]) > .8)
| bsd-3-clause |
richardotis/scipy | scipy/special/add_newdocs.py | 11 | 70503 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when x is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functios
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
| bsd-3-clause |
prheenan/Research | Personal/EventDetection/Util/Learning.py | 1 | 23562 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys,traceback
from Research.Personal.EventDetection.Util import Analysis,InputOutput,Scoring
from GeneralUtil.python import CheckpointUtilities,GenUtilities,PlotUtilities
from sklearn.model_selection import StratifiedKFold
import multiprocessing
class fold_meta:
def __init__(self,meta):
self.velocity = meta.Velocity
self.name = meta.Name
self.source_file = meta.SourceFile
class fold:
def __init__(self,param,scores,info):
"""
stores a single 'fold' (ie: fixed parameter value, scores and meta
for each FEC)
Args:
param: parameter used
scores: list of score object, one per FEC in thefold
info: meta information about the FEC objects
"""
self.param = param
self.scores = scores
self.info = info
class learning_curve:
def __init__(self,description,func_to_call,list_of_params):
"""
stores the folds and parameters associated with a function and list of
learning parameters
Args:
description: short name
func_to_call: what function to call for this data
list of paramters: list of dictionaries (Each of which is
passed to func_to_call for the appropriate folds)
"""
self.description = description
self.list_of_folds = None
self.validation_folds = None
self.list_of_params = list_of_params
self.func_to_call = func_to_call
def param_values(self):
# XXX assume 1-D search, only one parameters per list
return np.array([l.values()[0] for l in self.list_of_params])
def set_list_of_folds(self,folds):
self.list_of_folds = folds
def set_validation_folds(self,folds):
self.validation_folds = folds
def _scores_by_params(self,train=True,score_tx_func=lambda x: x):
"""
Returns a nested list; first lever is parameter, second level is
folds, third level is all scores in the fold
Args:
train: wherher to get the training or validaiton folds
score_tx_func: takes in a scoring object, should also return one
useful for (e.g.) only looking at subsets of data
Returns:
nested list as desribed
"""
fold_list = self.list_of_folds if train else self.validation_folds
scores_by_params = [ [[score_tx_func(s) for s in fold.scores]
for fold in folds_by_param]
for folds_by_param in fold_list]
return scores_by_params
def _walk_scores(scores,
func_score=lambda x : x,
func_fold =lambda x : x,
func_param=lambda x : x,
func_top = lambda x:x):
"""
function for 'easily' walking through list of scores
Args:
func_<x>: applied at the level of x (e.g. func_score is for a single
score, func_fold is a list of scores meaning a fold, etc)
Returns:
result of the chained functions
"""
return func_top([ func_param([ func_fold([func_score(s) for s in scores])
for scores in by_param])
for by_param in scores])
def safe_scores(scores,value_func=lambda x: x,eval_func=lambda x:x):
"""
function for getting possibly None values and evaluating them
Args:
scores: list of scores
value:func: takes a score, gives a value
eval_func: for evaluating the non-None values
Returns:
result of the chained functions
"""
raw = [value_func(s) for s in scores]
safe = [r for r in raw if r is not None]
if (len(safe) > 0):
return eval_func(safe)
else:
return None
def safe_median(scores):
"""
function for safely evaluating the median
Args:
scores: see safe_scores
Returns:
result of the chained functions
"""
return safe_scores(scores,eval_func=np.median)
def median_dist_per_param(scores,**kwargs):
"""
function for safely getting the median of the scores we want
Args:
scores: see safe_scores
**kwargs: passed to minimum_distance_median
Returns:
median of the minimum distance to an event, per paramter across folds
(1-D arrray)
"""
score_func = lambda x: x.minimum_distance_median(**kwargs)
func_fold = lambda x: safe_scores(x,value_func=score_func,
eval_func=np.median)
return _walk_scores(scores,func_fold =func_fold,
func_param=safe_median,func_top=np.array)
def stdev_dist_per_param(scores,**kwargs):
"""
function for safely getting the median of the scores we want
Args:
scores: see safe_scores
kwargs: see median_dist_per_param
Returns:
stdev of the minimum distance to an event, per paramter across folds
(1-D arrray)
"""
score_func = lambda x: x.minimum_distance_distribution(**kwargs)
eval_func = lambda x: np.std(np.concatenate(x))
func_fold = lambda x: safe_scores(x,value_func=score_func,
eval_func=eval_func)
return _walk_scores(scores,func_fold =func_fold,
func_param=safe_median,func_top=np.array)
def rupture_objects(scores,get_true,slice_v=slice(0,None,1)):
"""
get the rupture objects associated with the scores
Args:
scores: see event_distance_distribution
get_true: if true, gets the *true* rupture objects associated with the
Returns:
array of rupture objects, one per parameter
"""
if (get_true):
func_tmp = lambda x: [v.ruptures_true for v in x]
else:
func_tmp = lambda x: [v.ruptures_predicted for v in x]
# need to concatenate everything
func_fold = lambda *args,**kwargs: np.concatenate(func_tmp(*args,**kwargs))
return _walk_scores(scores,func_fold=func_fold,
func_param=np.concatenate,func_top=np.array)
def limits_and_bins_force_and_load(ruptures_true,ruptures_pred,
loading_true,loading_pred,n=10,limit=False):
"""
Return a 4-tuple of limit,bins for rupture force and loading rate
Args:
<x>_<true/pred> : llist of true/predicted x
limit: if true, limite each axis to the 98 percentle of the data
n: number of bins
Returns:
limits force,bins force,limits loaidng,bins loading
"""
double_f = lambda f1,f2,*args: f2(f1([data
for f_type in args
for data in f_type]))
# determine the limits on the rupture force
if limit:
get_limited_data = lambda x: np.array(x)[((x >= np.percentile(x,1)) &
(x <= np.percentile(x,99)))]
else:
get_limited_data = lambda x: x
min_y = double_f(get_limited_data,np.min,ruptures_pred,ruptures_true)
max_y = double_f(get_limited_data,np.max,ruptures_pred,ruptures_true)
lim_force = [min_y,max_y]
# determine the limits on the loading rate
safe = lambda x: [x[i] for i in np.where(np.array(x)>0)[0]]
min_x = double_f(get_limited_data,np.min,safe(loading_pred),
safe(loading_true))
max_x = double_f(get_limited_data,np.max,safe(loading_pred),
safe(loading_true))
lim_load = [min_x,max_x]
bins_rupture= np.linspace(*lim_force,num=n)
min_y = max(min(lim_load),1e-2)
logy = np.log10([min_y,max(lim_load)])
bins_load = np.logspace(*logy,num=n)
return lim_force,bins_rupture,lim_load,bins_load
def get_rupture_in_pN_and_loading_in_pN_per_s(objs):
"""
Args:
objs: see _plot_rupture_objecs
Returns:
tuple of <rupture force in pN, loading rate in pN>
"""
to_pN = lambda x: x * 1e12
rupture_forces_pN = np.array([to_pN(obj.rupture_force) for obj in objs])
loading_rate_pN_per_s = np.array([to_pN(obj.loading_rate) for obj in objs])
return rupture_forces_pN,loading_rate_pN_per_s
def get_true_and_predicted_ruptures_per_param(learner,**kw):
"""
gets the truee and preicted rupture objects for the *validation* folds
of each learner object
Args:
learner: the learner_curve obect to use
**kw: passed to _scores_by_params
Returns:
tuple of validation true, predicted ruptures
"""
train_scores = learner._scores_by_params(train=True,**kw)
valid_scores = learner._scores_by_params(train=False,**kw)
# get the validation ruptures (both true and predicted)
ruptures_valid_true = rupture_objects(valid_scores,get_true=True)
ruptures_valid_pred = rupture_objects(valid_scores,get_true=False)
return ruptures_valid_true,ruptures_valid_pred
def concatenate_all(x):
return np.concatenate([list(np.array(v).flatten())
for v in x if len(v) > 0])
def lambda_distribution(scores,f_lambda):
"""
gets the distribution of distances at each paramter value
Args:
scores: learner._scores_by_params object
**kwargs: passed to minimum_distance_distribution
Returns:
concatenates distributions at each parameter value
"""
func_fold = lambda x: [f_lambda(v) for v in x]
func_param = concatenate_all
return _walk_scores(scores,func_fold = func_fold,
func_param=func_param,func_top=np.array)
def event_distance_distribution(scores,distance_is_absolute=False,**kwargs):
"""
gets the distribution of distances at each paramter value
Args:
scores: learner._scores_by_params object
**kwargs: passed to minimum_distance_distribution
Returns:
concatenates distributions at each parameter value
"""
kw = dict(distance_is_absolute=distance_is_absolute,**kwargs)
func_fold = lambda x: \
np.concatenate([v.minimum_distance_distribution(**kw) for v in x])
return _walk_scores(scores,func_fold = func_fold,
func_param=np.concatenate,func_top=np.array)
def f_score_dist(v):
"""
returns the distance f score for the given score object v
Args:
v: to scoore
Returns;
f score, 0 to 1, higher is better.
"""
kw = dict(floor_is_max=True)
dist_to_true = v.minimum_distance_distribution(to_true=True,**kw)
dist_to_pred = v.minimum_distance_distribution(to_true=False,**kw)
max_x = v.max_x
# get the averages ? XXX
if (len(dist_to_true) != 0):
average_to_true = np.median(dist_to_true)
else:
average_to_true = 0
if (len(dist_to_pred) != 0):
average_to_pred = np.median(dist_to_pred)
else:
average_to_pred = 0
# defining precision and recall in a distance-analogy sense
precision_dist = average_to_true
recall_dist = average_to_pred
f_score = \
1-(2/max_x) * (precision_dist * recall_dist)/(precision_dist+recall_dist)
return f_score
def event_distance_f_score(scores):
"""
returns the *distance* f score for each curve
Args:
scores: see fold_number_events_off
"""
func_fold = lambda x: [f_score_dist(v) for v in x]
return _walk_scores(scores,func_fold = func_fold,
func_param=np.concatenate,func_top=np.array)
def fold_number_events_off(scores):
"""
see number_events_off_per_param
Args:
scores: see number_events_off_per_param
learning_curve._scores_by_params
returns:
sum of missed events divided by sum of true events
"""
true_pred = [x.n_true_and_predicted_events() for x in scores]
true_list = [t[0] for t in true_pred]
pred_list = [t[1] for t in true_pred]
missed_list = [abs(true-pred) for true,pred in zip(true_list,pred_list)]
relative_missed = np.sum(missed_list)/np.sum(true_list)
to_ret = relative_missed
return to_ret
def number_events_off_per_param(params,scores):
"""
gets the (relative) number of events we were off by:
(1) gettting the predicted and true number of events in a fold
(2) getting rel=missed - true
(3) taking the mean and stdev of rel across all folds
Args:
params: the x value to use
scores: the scorer object to use, formatted like
learning_curve._scores_by_params
returns;
tuple of <valid params, valid scores, valie errors>
"""
cat_median = lambda x: safe_median(np.concatenate(x))
cat_std = lambda x: np.std(np.concatenate(x))
score_func = lambda s : _walk_scores(s,func_fold=fold_number_events_off,
func_param=safe_median,
func_top=np.array)
error_func = lambda s: _walk_scores(s,func_fold=fold_number_events_off,
func_param=np.std,
func_top=np.array)
kw = dict(score_func=score_func,error_func=error_func)
return valid_scores_erors_and_params(params,scores,**kw)
def median_dist_metric(x_values,scores,**kwargs):
"""
function for safely getting the median metric
Args:
x_values: the parameters
scores: see safe_scores
**kwargs: passed to minimum_distance_median...
Returns:
see valid_scores_erors_and_params
"""
score_func_pred = lambda x: median_dist_per_param(x,**kwargs)
error_func_pred = lambda x: stdev_dist_per_param(x,**kwargs)
kw_pred = dict(score_func=score_func_pred,error_func=error_func_pred)
x,dist,dist_std = valid_scores_erors_and_params(x_values,scores,**kw_pred)
return x,dist,dist_std
def valid_scores_erors_and_params(params,scores,score_func,error_func):
"""
given a function for getting scores and errors, finds where the results
are valid, selecting the x,y, and erro there
Args:
params: what the x values are
scores: the scores we will search using score/error_func
<score/error>_func: functions giving the scores and errors of scores
at each value of params. If undefined, then is None
Returns:
tuple of <valid x, valid score, valid score error>
"""
dist = score_func(scores)
dist_std = error_func(scores)
valid_func = lambda x: (~np.equal(x,None))
good_idx_func = lambda train,valid : np.where( valid_func(train) & \
valid_func(valid))
good_idx = good_idx_func(dist,dist_std)
return params[good_idx],dist[good_idx],dist_std[good_idx]
def single_example_info_and_score(func,example,**kwargs):
"""
Returns a list of learning_curve objects
Args:
func: a method taking in example and **kwargs and returning event
indices
example: TimeSepForce object
**kwargs: for func
Returns:
tuple of <scoring object, folding_meta object>
"""
meta = example.Meta
# get the predicted event index
event_idx = func(example,**kwargs)
example_split = Analysis.zero_and_split_force_extension_curve(example)
# get the score
score = Scoring.get_scoring_info(example_split,event_idx)
return score,fold_meta(meta)
def run_functor(functor):
"""
Given a no-argument functor, run it and return its result. We can
use this with multiprocessing.map and map it over a list of job
functors to do them.
Handles getting more than multiprocessing's pitiful exception output
Args:
functor: a no-argument function (probably a lambda)
Returns:
whatever functor does, possibly raising an exception
"""
try:
# This is where you do your actual work
return functor()
except:
# Put all exception text into an exception and raise that
err_string = "".join(traceback.format_exception(*sys.exc_info()))
raise Exception(err_string)
def single_example_multiproc(args):
"""
multiprocesing interface to single_example_info_and_score
Args:
tuple of arguments to single_example_info_and_score
Returns:
see single_example_info_and_score
"""
func,example,dict_kwargs = args
return single_example_info_and_score(func,example,**dict_kwargs)
class multiprocessing_functor(object):
def __init__(self):
pass
def __call__(self,*args):
return single_example_multiproc(*args)
def single_fold_score(fold_data,func,kwargs,pool):
"""
Gets the fold object for a single set of data (ie: a single fold)
for a fixed parameter set
Args:
fold_data: set of TimeSepForce objects to use
func: to call, should return a list of predicted event starts
kwargs: dict, fixed parameters to pass to func
pool: if not none, a Multiprocessing pool for parallel processing...
Returns:
fold object
"""
scores = []
info = []
if (pool is None):
scores_info = [single_example_info_and_score(func,ex,**kwargs)
for ex in fold_data]
else:
# we make a list of functor objects (functions + arguments)
# that we can then safely run
functors_args = [ (func,ex,kwargs) for ex in fold_data]
scores_info = pool.map(multiprocessing_functor(),functors_args)
# POST: got the scores an info, somehow...
scores = [s[0] for s in scores_info]
info = [s[1] for s in scores_info]
return fold(kwargs,scores,info)
def folds_for_one_param(data,param,fold_idx,func_to_call,pool):
folds = []
folds_valid = []
for train_idx,test_idx in fold_idx:
# get the training scores
fold_data = [data[f] for f in train_idx]
folds_tmp = single_fold_score(fold_data,func_to_call,param,
pool=pool)
folds.append(folds_tmp)
# get the validation scores
valid_data = [data[f] for f in test_idx]
valid_fold = single_fold_score(valid_data,func_to_call,param,
pool=pool)
folds_valid.append(valid_fold)
return folds,folds_valid
def get_all_folds_for_one_learner(cache_directory,force,learner,data,fold_idx,
pool):
"""
Gets all the folds for a single learner
Args:
cache_directory: base where we cache things
force: if true, force re-reading of all folds
learner: learning_curve to use
data: list of TimeSepForce objects to use
fold_idx: list of <train,test>; one tuple per fold
pool: a multiprocessing pool to draw from (if not None)
Returns:
tuple of :
(0) list, one element per paramter. each element is a list of folds
(1) list, one element per parameter. each element is a list of
validation folds
"""
func_to_call = learner.func_to_call
params_then_folds,param_validation_fold = [],[]
for i,param in enumerate(learner.list_of_params):
param_val = param.values()[0]
cache_name = "{:s}_{:s}_param_{:d}_{:.3g}.pkl".\
format(cache_directory,learner.description,i,param_val)
ret = CheckpointUtilities.getCheckpoint(cache_name,folds_for_one_param,
force,data,param,fold_idx,
func_to_call,pool=pool)
folds,folds_valid = ret
# done with all the folds for this parameter; save them out
params_then_folds.append(folds)
param_validation_fold.append(folds_valid)
return params_then_folds,param_validation_fold
def _get_single_curve(name,tuple_v,func):
"""
Returns a single learning curve object
Args:
name: the name of the curvess
tuple_v: tuple like <function to call, list of parameters>
func: takes in list of single parameters, returns a list of kwargs
dicts for func
Returns:
learning_curve object
"""
return learning_curve(name,tuple_v[0],func(tuple_v[1]))
def get_single_learner_folds(cache_directory,force,l,data,fold_idx,pool_size):
"""
return the training and testing folds for a given learner
Args:
cache_directory: where to cache individual parameter folds
force: if caching should be forced
l : learner to use
d : data to use
fold_idx: which indices to use for the folds
pool_size: number of processing to use. If <=1, just uses 1
(no parallelism)
Returns:
tuple of <training,validation folds>
"""
if (pool_size <= 1):
# dont use a multiprocessing pool
pool = None
else:
pool = multiprocessing.Pool(pool_size)
print("Using {:d} processes for {:s}".format(pool_size,l.description))
ret = get_all_folds_for_one_learner(cache_directory,force,
l,data,fold_idx,pool=pool)
list_of_folds,validation_folds = ret
return list_of_folds,validation_folds
def get_cached_folds(categories,force_read,force_learn,
cache_directory,limit,n_folds,seed=42,
learners=None,pool_size=1):
"""
caches all the results for every learner after reading in all the data
Args:
categories: list of velocity-separated data
force_read: if the csv fiels should be re-read
force_learn: if the learner objects should be re-read
cache_directoy: where to put the pkl files
limit: how many examples to read in
n_folds: now many folds to use
seed: for PRNG
Returns:
list, one element per paramter. each element is a list of folds
"""
# read and update all the categories
categories = InputOutput.\
read_categories(categories,force_read,cache_directory,limit)
labels_data = [ [i,d] for i,cat in enumerate(categories) for d in cat.data]
labels = [l[0] for l in labels_data]
data = [l[1] for l in labels_data]
# determine the folds to use
fold_obj = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=seed)
# .split returns a generator by default; convert to a list to avoid
# making it only used for the first fold
fold_idx = list(fold_obj.split(X=np.zeros(len(labels)),y=labels))
if (learners is None):
learners = get_learners()
# POST: all data read in. get all the scores for all the learners.
for l in learners:
cache_file = cache_directory + "folds_" + l.description + ".pkl"
tmp = CheckpointUtilities.getCheckpoint(cache_file,
get_single_learner_folds,
force_learn,
cache_directory,force_learn,
l,data=data,fold_idx=fold_idx,
pool_size=pool_size)
list_of_folds,validation_folds = tmp
l.set_list_of_folds(list_of_folds)
l.set_validation_folds(validation_folds)
return learners
| gpl-3.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| gpl-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/userdemo/anchored_box03.py | 1 | 1230 | """
==============
Anchored Box03
==============
"""
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredAuxTransformBox
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(3, 3))
box = AnchoredAuxTransformBox(ax.transData, loc=2)
el = Ellipse((0, 0), width=0.1, height=0.4, angle=30) # in data coordinates!
box.drawing_area.add_artist(el)
ax.add_artist(box)
pltshow(plt)
| mit |
Mega-DatA-Lab/mxnet | example/svm_mnist/svm_mnist.py | 44 | 4094 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size, label_name='svm_label')
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size, label_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
helldorado/ansible | hacking/cgroup_perf_recap_graph.py | 54 | 4384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import argparse
import csv
from collections import namedtuple
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
raise SystemExit('matplotlib is required for this script to work')
Data = namedtuple('Data', ['axis_name', 'dates', 'names', 'values'])
def task_start_ticks(dates, names):
item = None
ret = []
for i, name in enumerate(names):
if name == item:
continue
item = name
ret.append((dates[i], name))
return ret
def create_axis_data(filename, relative=False):
x_base = None if relative else 0
axis_name, dummy = os.path.splitext(os.path.basename(filename))
dates = []
names = []
values = []
with open(filename) as f:
reader = csv.reader(f)
for row in reader:
if x_base is None:
x_base = float(row[0])
dates.append(mdates.epoch2num(float(row[0]) - x_base))
names.append(row[1])
values.append(float(row[3]))
return Data(axis_name, dates, names, values)
def create_graph(data1, data2, width=11.0, height=8.0, filename='out.png', title=None):
fig, ax1 = plt.subplots(figsize=(width, height), dpi=300)
task_ticks = task_start_ticks(data1.dates, data1.names)
ax1.grid(linestyle='dashed', color='lightgray')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%X'))
ax1.plot(data1.dates, data1.values, 'b-')
if title:
ax1.set_title(title)
ax1.set_xlabel('Time')
ax1.set_ylabel(data1.axis_name, color='b')
for item in ax1.get_xticklabels():
item.set_rotation(60)
ax2 = ax1.twiny()
ax2.set_xticks([x[0] for x in task_ticks])
ax2.set_xticklabels([x[1] for x in task_ticks])
ax2.grid(axis='x', linestyle='dashed', color='lightgray')
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 86))
ax2.set_xlabel('Task')
ax2.set_xlim(ax1.get_xlim())
for item in ax2.get_xticklabels():
item.set_rotation(60)
ax3 = ax1.twinx()
ax3.plot(data2.dates, data2.values, 'g-')
ax3.set_ylabel(data2.axis_name, color='g')
fig.tight_layout()
fig.savefig(filename, format='png')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs=2, help='2 CSV files produced by cgroup_perf_recap to graph together')
parser.add_argument('--relative', default=False, action='store_true',
help='Use relative dates instead of absolute')
parser.add_argument('--output', default='out.png', help='output path of PNG file: Default %s(default)s')
parser.add_argument('--width', type=float, default=11.0,
help='Width of output image in inches. Default %(default)s')
parser.add_argument('--height', type=float, default=8.0,
help='Height of output image in inches. Default %(default)s')
parser.add_argument('--title', help='Title for graph')
return parser.parse_args()
def main():
args = parse_args()
data1 = create_axis_data(args.files[0], relative=args.relative)
data2 = create_axis_data(args.files[1], relative=args.relative)
create_graph(data1, data2, width=args.width, height=args.height, filename=args.output, title=args.title)
print('Graph written to %s' % os.path.abspath(args.output))
if __name__ == '__main__':
main()
| gpl-3.0 |
ClimbsRocks/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
dopplershift/Scattering | scripts/scatter_models_units.py | 1 | 2244 | import matplotlib.pyplot as plt
import numpy as np
import scattering, dsd
import scipy.constants as consts
import quantities as pq
def to_dBz(lin):
return 10.0 * np.log10((lin / pq.CompoundUnit("mm^6/m^3")).simplified)
d = np.linspace(0.01, 1.0, 200).reshape(200,1) * pq.cm
l = np.linspace(0.01, 25.0, 100).reshape(1,100) * pq.g / pq.m**3
dist = dsd.mp_from_lwc(d, l)
#lam = 0.1
lam = 0.0321 * pq.m
temp = 10.0
db_factor = 10.0 * np.log10(np.e)
mie = scattering.scatterer(lam, temp, 'water', diameters=d)
mie.set_scattering_model('mie')
ray = scattering.scatterer(lam, temp, 'water', diameters=d)
ray.set_scattering_model('rayleigh')
oblate_rg = scattering.scatterer(lam, temp, 'water', diameters=d,
shape='oblate')
oblate_rg.set_scattering_model('gans')
sphere_rg = scattering.scatterer(lam, temp, 'water', diameters=d,
shape='sphere')
sphere_rg.set_scattering_model('gans')
oblate = scattering.scatterer(lam, temp, 'water', diameters=d,
shape='oblate')
oblate.set_scattering_model('tmatrix')
d = d.squeeze()
l = l.squeeze()
lines = ['r-', 'g-', 'b-', 'k-', 'k--']
names = ['Rayleigh', 'Rayleigh-Gans (oblate)', 'Rayleigh-Gans (sphere)',
'Mie', 'T-Matrix (oblate)']
models = [ray, oblate_rg, sphere_rg, mie, oblate]
for model, line, name in zip(models, lines, names):
ref = to_dBz(model.get_reflectivity_factor(dist))
atten = model.get_attenuation(dist).rescale(1/pq.km) * db_factor
plt.subplot(2, 2, 1)
plt.semilogy(d, model.sigma_b.rescale('m^2'), line, label=name)
plt.subplot(2, 2, 2)
plt.plot(l, ref, line, label=name)
plt.subplot(2, 2, 3)
plt.semilogy(d, model.sigma_e.rescale('m^2'), line, label=name)
plt.subplot(2, 2, 4)
plt.plot(l, atten, line, label=name)
plt.subplot(2,2,1)
plt.legend(loc = 'lower right')
plt.xlabel('Diameter (cm)')
plt.ylabel(r'$\sigma_b \rm{(m^2)}$')
plt.subplot(2,2,2)
plt.xlabel('Rain Content (g/m^3)')
plt.ylabel(r'Z$_{e}$ (dBz)')
plt.subplot(2,2,3)
plt.xlabel('Diameter (cm)')
plt.ylabel(r'$\sigma_e \rm{(m^2)}$')
plt.subplot(2,2,4)
plt.xlabel('Rain Content (g/m^3)')
plt.ylabel('1-way Attenuation (db/km)')
plt.gcf().text(0.5,0.95,'Comparison of Various Scattering models',
horizontalalignment='center',fontsize=16)
plt.show()
| bsd-2-clause |
kylerbrown/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
msultan/msmbuilder | msmbuilder/tests/test_decomposition.py | 3 | 6992 | from __future__ import absolute_import
import numpy as np
from mdtraj.testing import eq
from numpy.testing import assert_approx_equal
from numpy.testing import assert_array_almost_equal
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA as PCAr
from msmbuilder.example_datasets import AlanineDipeptide
from ..cluster import KCenters
from ..decomposition import (FactorAnalysis, FastICA, KernelTICA,
MiniBatchSparsePCA, PCA, SparsePCA, tICA)
from ..decomposition.kernel_approximation import LandmarkNystroem
from ..featurizer import DihedralFeaturizer
random = np.random.RandomState(42)
trajs = [random.randn(10, 3) for _ in range(5)]
def test_tica_fit_transform():
X = random.randn(10, 3)
tica = tICA(n_components=2, lag_time=1)
y2 = tica.fit_transform([np.copy(X)])[0]
def test_tica_singular_1():
tica = tICA(n_components=1)
# make some data that has one column repeated twice
X = random.randn(100, 2)
X = np.hstack((X, X[:, 0, np.newaxis]))
tica.fit([X])
assert tica.components_.dtype == np.float64
assert tica.eigenvalues_.dtype == np.float64
def test_tica_singular_2():
tica = tICA(n_components=1)
# make some data that has one column of all zeros
X = random.randn(100, 2)
X = np.hstack((X, np.zeros((100, 1))))
tica.fit([X])
assert tica.components_.dtype == np.float64
assert tica.eigenvalues_.dtype == np.float64
def test_tica_shape():
model = tICA(n_components=3).fit([random.randn(100, 10)])
eq(model.eigenvalues_.shape, (3,))
eq(model.eigenvectors_.shape, (10, 3))
eq(model.components_.shape, (3, 10))
def test_tica_score_1():
X = random.randn(100, 5)
for n in range(1, 5):
tica = tICA(n_components=n, shrinkage=0)
tica.fit([X])
assert_approx_equal(
tica.score([X]),
tica.eigenvalues_.sum())
assert_approx_equal(tica.score([X]), tica.score_)
def test_tica_score_2():
X = random.randn(100, 5)
Y = random.randn(100, 5)
model = tICA(shrinkage=0.0, n_components=2).fit([X])
s1 = model.score([Y])
s2 = tICA(shrinkage=0.0).fit(model.transform([Y])).eigenvalues_.sum()
eq(s1, s2)
def test_tica_multiple_components():
X = random.randn(100, 5)
tica = tICA(n_components=1, shrinkage=0)
tica.fit([X])
Y1 = tica.transform([X])[0]
tica.n_components = 4
Y4 = tica.transform([X])[0]
tica.n_components = 3
Y3 = tica.transform([X])[0]
assert Y1.shape == (100, 1)
assert Y4.shape == (100, 4)
assert Y3.shape == (100, 3)
eq(Y1.flatten(), Y3[:, 0])
eq(Y3, Y4[:, :3])
def test_tica_kinetic_mapping():
X = random.randn(10, 3)
tica1 = tICA(n_components=2, lag_time=1)
tica2 = tICA(n_components=2, lag_time=1, kinetic_mapping=True)
y1 = tica1.fit_transform([np.copy(X)])[0]
y2 = tica2.fit_transform([np.copy(X)])[0]
assert eq(y2, y1 * tica1.eigenvalues_)
def test_tica_commute_mapping():
X = random.randn(10, 3)
tica1 = tICA(n_components=2, lag_time=1)
tica2 = tICA(n_components=2, lag_time=1, commute_mapping=True)
y1 = tica1.fit_transform([np.copy(X)])[0]
y2 = tica2.fit_transform([np.copy(X)])[0]
regularized_timescales = 0.5 * tica2.timescales_ *\
np.tanh( np.pi *((tica2.timescales_ - tica2.lag_time)
/tica2.lag_time) + 1)
assert eq(y2, np.nan_to_num(y1 * np.sqrt(regularized_timescales/2)))
def test_pca_vs_sklearn():
# Compare msmbuilder.pca with sklearn.decomposition
pcar = PCAr()
pcar.fit(np.concatenate(trajs))
pca = PCA()
pca.fit(trajs)
y_ref1 = pcar.transform(trajs[0])
y1 = pca.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
np.testing.assert_array_almost_equal(pca.components_, pcar.components_)
np.testing.assert_array_almost_equal(pca.explained_variance_,
pcar.explained_variance_)
np.testing.assert_array_almost_equal(pca.mean_, pcar.mean_)
np.testing.assert_array_almost_equal(pca.n_components_, pcar.n_components_)
np.testing.assert_array_almost_equal(pca.noise_variance_,
pcar.noise_variance_)
def test_pca_pipeline():
# Test that PCA it works in a msmbuilder pipeline
p = Pipeline([('pca', PCA()), ('cluster', KCenters())])
p.fit(trajs)
def test_pca_generator():
# Check to see if it works with a generator
traj_dict = dict((i, t) for i, t in enumerate(trajs))
pcar = PCAr()
pcar.fit(np.concatenate(trajs))
pca = PCA()
# on python 3, dict.values() returns a generator
pca.fit(traj_dict.values())
y_ref1 = pcar.transform(trajs[0])
y1 = pca.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
np.testing.assert_array_almost_equal(pca.components_, pcar.components_)
np.testing.assert_array_almost_equal(pca.explained_variance_,
pcar.explained_variance_)
np.testing.assert_array_almost_equal(pca.mean_, pcar.mean_)
np.testing.assert_array_almost_equal(pca.n_components_, pcar.n_components_)
np.testing.assert_array_almost_equal(pca.noise_variance_,
pcar.noise_variance_)
def test_sparsepca():
pca = SparsePCA()
pca.fit_transform(trajs)
pca.summarize()
def test_minibatchsparsepca():
pca = MiniBatchSparsePCA()
pca.fit_transform(trajs)
pca.summarize()
def test_fastica():
ica = FastICA()
ica.fit_transform(trajs)
ica.summarize()
def test_factoranalysis():
fa = FactorAnalysis()
fa.fit_transform(trajs)
fa.summarize()
def test_ktica_compare_to_tica():
trajectories = AlanineDipeptide().get_cached().trajectories
featurizer = DihedralFeaturizer(sincos=True)
features = featurizer.transform(trajectories[0:1])
features = [features[0][::10]]
tica = tICA(lag_time=1, n_components=2)
ktica = KernelTICA(lag_time=1, kernel='linear', n_components=2,
random_state=42)
tica_out = tica.fit_transform(features)[0]
ktica_out = ktica.fit_transform(features)[0]
assert_array_almost_equal(ktica_out, tica_out, decimal=1)
def test_ktica_compare_to_pipeline():
X = random.randn(100, 5)
ktica = KernelTICA(kernel='rbf', lag_time=5, n_components=1,
random_state=42)
y1 = ktica.fit_transform([X])[0]
u = np.arange(X.shape[0])[5::1]
v = np.arange(X.shape[0])[::1][:u.shape[0]]
lndmrks = X[np.unique((u, v))]
assert_array_almost_equal(lndmrks, ktica.landmarks, decimal=3)
nystroem = LandmarkNystroem(kernel='rbf', landmarks=lndmrks,
random_state=42)
tica = tICA(lag_time=5, n_components=1)
y2_1 = nystroem.fit_transform([X])
y2_2 = tica.fit_transform(y2_1)[0]
assert_array_almost_equal(y1, y2_2)
| lgpl-2.1 |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Dynamic_Examples/contact/Dry_Contact/Hard_Contact/Frictional_SDOF_With_Tangential_Damping/plot.py | 6 | 1237 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 24})
plt.style.use('grayscale')
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=20
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=20
fig = plt.figure(figsize=(10,10))
# Go over each feioutput and plot each one.
thefile = "Frictional_SDOF_freeVibration.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(times,disp,Linewidth=4)
plt.grid()
plt.minorticks_on()
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
nlalevee/spark | python/pyspark/worker.py | 4 | 10236 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
import socket
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.taskcontext import TaskContext
from pyspark.files import SparkFiles
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasSerializer
from pyspark.sql.types import to_arrow_type
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_pandas_scalar_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_length(*a):
result = f(*a)
if not hasattr(result, "__len__"):
raise TypeError("Return type of the user-defined functon should be "
"Pandas.Series, but is {}".format(type(result)))
if len(result) != len(a[0]):
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (len(a[0]), len(result)))
return result
return lambda *a: (verify_result_length(*a), arrow_return_type)
def wrap_pandas_group_map_udf(f, return_type):
def wrapped(*series):
import pandas as pd
result = f(pd.concat(series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
arrow_return_types = (to_arrow_type(field.dataType) for field in return_type)
return [(result[result.columns[i]], arrow_type)
for i, arrow_type in enumerate(arrow_return_types)]
return wrapped
def read_single_udf(pickleSer, infile, eval_type):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_PANDAS_SCALAR_UDF:
return arg_offsets, wrap_pandas_scalar_udf(row_func, return_type)
elif eval_type == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
return arg_offsets, wrap_pandas_group_map_udf(row_func, return_type)
else:
return arg_offsets, wrap_udf(row_func, return_type)
def read_udfs(pickleSer, infile, eval_type):
num_udfs = read_int(infile)
udfs = {}
call_udf = []
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
# Create function like this:
# lambda a: (f0(a0), f1(a1, a2), f2(a3))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
if eval_type == PythonEvalType.SQL_PANDAS_SCALAR_UDF \
or eval_type == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
timezone = utf8_deserializer.loads(infile)
ser = ArrowStreamPandasSerializer(timezone)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# initialize global state
taskContext = TaskContext._getOrCreate()
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
exit(-1)
if __name__ == '__main__':
# Read a local port to connect to from stdin
java_port = int(sys.stdin.readline())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", java_port))
sock_file = sock.makefile("rwb", 65536)
main(sock_file, sock_file)
| apache-2.0 |
mcstrother/dicom-sr-qi | unported scripts/magnification.py | 2 | 2713 | """Makes box plots for DAP and exposure
vs magnification. Can change some things
in line to change the data source (bjh vs. slch)
and to decide between graphing DAP and Exposure
"""
import my_utils
import srdata
import csv
import matplotlib
def build_table():
procs = srdata.process_file(my_utils.BJH_XML_FILE, my_utils.BJH_SYNGO_FILES)
procs = procs + srdata.process_file(my_utils.SLCH_XML_FILE, my_utils.SLCH_SYNGO_FILES)
#procs = srdata.process_file(my_utils.SLCH_XML_FILE, my_utils.SLCH_SYNGO_FILES)
dose_lookup = {}
exposure_lookup = {}
DAP_lookup = {}
for proc in procs:
for e in proc.events:
if e.is_valid() and e.Irradiation_Event_Type == "Fluoroscopy":
if not e.iiDiameter in dose_lookup:
dose_lookup[e.iiDiameter] = []
exposure_lookup[e.iiDiameter] = []
DAP_lookup[e.iiDiameter] = []
dose_lookup[e.iiDiameter].append(e.Dose_RP/e.Number_of_Pulses)
exposure_lookup[e.iiDiameter].append(e.Exposure/e.Number_of_Pulses)
DAP_lookup[e.iiDiameter].append(e.Dose_Area_Product/e.Number_of_Pulses)
return (dose_lookup, exposure_lookup, DAP_lookup)
def write_csv(lookup):
table = []
for diameter, exposures in lookup.iteritems():
row = [str(diameter)]
row = row + [e for e in exposures]
table.append(row)
table = my_utils.transposed(table)
with open("temp.csv",'wb') as f:
w = csv.writer(f)
w.writerows(table)
import matplotlib.pyplot as plt
def plot(lookup):
data = []
for iiDiameter in sorted(lookup.keys()):
data.append(lookup[iiDiameter])
plt.boxplot(data, sym='')
plt.setp(plt.gca(),'xticklabels',sorted(lookup.keys()))
plt.show()
def setup_DAP_axes():
plt.title("DAP vs. Magnification")
plt.xlabel("iiDiameter")
plt.ylabel("DAP (Gy*m^2)")
def setup_exposure_axes():
plt.title("Exposure vs. Magnification")
plt.xlabel("iiDiameter")
plt.ylabel("Exposure (uAs)")
def main():
dose_lookup,exposure_lookup,DAP_lookup = build_table()
plt.figure(1)
#setup_DAP_axes()
#plot(DAP_lookup)
setup_exposure_axes()
plot(exposure_lookup)
#write_csv(DAP_lookup)
if __name__ == "__main__":
main()
| bsd-2-clause |
joshzarrabi/e-mission-server | emission/tests/analysisTests/intakeTests/TestFilterAccuracy.py | 1 | 5000 | # Standard imports
import unittest
import datetime as pydt
import logging
import pymongo
import json
import bson.json_util as bju
import pandas as pd
# Our imports
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
class TestFilterAccuracy(unittest.TestCase):
def setUp(self):
# We need to access the database directly sometimes in order to
# forcibly insert entries for the tests to pass. But we put the import
# in here to reduce the temptation to use the database directly elsewhere.
import emission.core.get_database as edb
import uuid
self.testUUID = uuid.uuid4()
self.entries = json.load(open("emission/tests/data/smoothing_data/tablet_2015-11-03"),
object_hook=bju.object_hook)
for entry in self.entries:
entry["user_id"] = self.testUUID
edb.get_timeseries_db().save(entry)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
def tearDown(self):
import emission.core.get_database as edb
edb.get_timeseries_db().remove({"user_id": self.testUUID})
edb.get_pipeline_state_db().remove({"user_id": self.testUUID})
def testEmptyCallToPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
# Check call to check duplicate with a zero length dataframe
entry = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_prior_duplicate(pd.DataFrame(), 0, entry), False)
def testEmptyCall(self):
# Check call to the entire filter accuracy with a zero length timeseries
import emission.core.get_database as edb
edb.get_timeseries_db().remove({"user_id": self.testUUID})
# We expect that this should not throw
eaicf.filter_accuracy(self.testUUID)
self.assertEqual(len(self.ts.get_data_df("background/location")), 0)
def testCheckPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry = unfiltered_points_df.iloc[5]
unfiltered_appended_df = pd.DataFrame([entry] * 5).append(unfiltered_points_df).reset_index()
logging.debug("unfiltered_appended_df = %s" % unfiltered_appended_df[["fmt_time"]].head())
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 0, entry), False)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 5, entry), True)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_points_df, 5, entry), False)
def testConvertToFiltered(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
entry_copy = eaicf.convert_to_filtered(self.ts.get_entry_at_ts("background/location",
"metadata.write_ts",
entry_from_df.metadata_write_ts))
self.assertNotIn("_id", entry_copy)
self.assertEquals(entry_copy["metadata"]["key"], "background/filtered_location")
def testExistingFilteredLocation(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), False)
entry_copy = self.ts.get_entry_at_ts("background/location", "metadata.write_ts",
entry_from_df.metadata_write_ts)
self.ts.insert(eaicf.convert_to_filtered(entry_copy))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), True)
def testFilterAccuracy(self):
unfiltered_points_df = self.ts.get_data_df("background/location", None)
self.assertEqual(len(unfiltered_points_df), 205)
pre_filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(pre_filtered_points_df), 0)
eaicf.filter_accuracy(self.testUUID)
filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(filtered_points_df), 124)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| bsd-3-clause |
clsb/miles | miles/commands.py | 1 | 38839 | """Commands for the user interface."""
__all__ = [name for name in dir() if name.lower().startswith('Command')]
import argparse
try:
import argcomplete
except ImportError:
argcomplete = None
import logging
import sys
from abc import ABCMeta, abstractmethod
from typing import Sequence
import miles.default as default
from miles import (Configuration, Distributions, Milestones, Simulation, bold, colored, load_database, load_distributions, make_database, save_distributions, version) # noqa: E501
class Command(metaclass=ABCMeta):
"""A command."""
name = None # type: str
@abstractmethod
def setup_parser(self, subparsers):
raise NotImplementedError
@abstractmethod
def do(self, args):
raise NotImplementedError
@staticmethod
def add_argument_config(parser):
arg = parser.add_argument('config', type=str,
metavar='CONFIG-FILE', help='name of '
'the configuration file')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
arg.completer = completer(allowednames='cfg')
class Commands:
"""A collection of commands."""
def __init__(self, parser, commands):
self.commands = []
for command in commands:
command.setup_parser(parser)
self.commands.append(command)
def __getitem__(self, command_name):
for command in self.commands:
if command.name == command_name:
return command
def filter_distributions(milestones: Milestones,
milestones_str: Sequence[str],
distributions: Distributions) \
-> Distributions:
"""Select specific distributions."""
if milestones_str:
selected_distributions = Distributions()
for milestone_str in milestones_str:
milestone = milestones.make_from_str(milestone_str)
if milestone in distributions.keys():
selected_distributions[milestone] = distributions[milestone]
return selected_distributions
else:
return distributions
class CommandRun(Command):
"""Run milestoning simulation"""
name = 'run'
def setup_parser(self, subparsers):
"""Command-line options for exact milestoning."""
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
b = p.add_argument('-i', '--input', type=str, required=True,
metavar='FILE', help='path to '
'file containing initial distributions')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
b.completer = completer(allowednames='dst')
p.add_argument('-s', '--samples', type=int,
default=default.samples_per_milestone_per_iteration,
metavar='MIN-SAMPLES', help='minimum number of '
'trajectory fragments to sample per milestone '
'per iteration (default is %(default)d)')
p.add_argument('-l', '--local-tolerance', type=float,
default=default.local_convergence_tolerance,
help='tolerance for convergence within each milestone '
'(default is %(default)g)')
p.add_argument('-g', '--global-tolerance', type=float,
default=default.global_convergence_tolerance,
help='tolerance for convergence of the '
'iterative process (default is %(default)g)')
p.add_argument('-m', '--max-iterations', type=int,
metavar='MAX-ITERATIONS',
default=default.max_iterations, help='maximum '
'number of iterations (default is '
'%(default)d)')
p.add_argument('-p', '--include-products',
action='store_true', default=False,
help='sample trajectories from product '
'milestones (default is %(default)s)')
p.add_argument('-M', '--mpi-processes', type=int,
required=True, help='number of MPI processes')
def do(self, args):
simulation = Simulation(args.config)
try:
distributions = load_distributions(args.input)
except FileNotFoundError:
logging.error('Unable to find {!r}'.format(args.input))
sys.exit(-1)
# Remove product milestones from the initial distributions.
if args.include_products is False:
for milestone in simulation.milestones.products:
if milestone in distributions.keys():
del distributions[milestone]
from miles.run import run
run(simulation, initial_distributions=distributions,
num_iterations=args.max_iterations,
num_samples=args.samples,
local_tolerance=args.local_tolerance,
global_tolerance=args.global_tolerance,
num_processes=args.mpi_processes)
class CommandSample(Command):
"""Sample trajectory fragments on specific milestones"""
name = 'sample'
def setup_parser(self, subparsers):
"""Command-line options for exact milestoning."""
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
b = p.add_argument('-i', '--input', type=str, required=True,
metavar='FILE', help='path to '
'file containing initial distributions')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
b.completer = completer(allowednames='dst')
p.add_argument('-m', '--milestone', metavar='MILESTONE',
required=True, action='append', help='restrict'
' to specified milestone(s)')
p.add_argument('-s', '--samples', type=int,
default=default.samples_per_milestone_per_iteration,
metavar='MIN-SAMPLES', help='minimum number of '
'trajectory fragments to sample per milestone '
'per iteration (default is %(default)d)')
p.add_argument('-l', '--local-tolerance', type=float,
default=default.local_convergence_tolerance,
help='tolerance for convergence within each milestone '
'(default is %(default)g)')
p.add_argument('-M', '--mpi-processes', type=int,
required=True, help='number of MPI processes')
def do(self, args):
simulation = Simulation(args.config)
try:
initial_distributions = load_distributions(args.input)
except FileNotFoundError:
logging.error('Unable to find {!r}'.format(args.input))
sys.exit(-1)
distributions = filter_distributions(simulation.milestones,
args.milestone,
initial_distributions)
if len(distributions) == 0:
logging.error('{!r} does not contain points for {}.'
.format(args.input,
','.join(args.milestone)))
sys.exit(-1)
from miles.run import run
run(simulation,
initial_distributions=distributions,
num_iterations=1,
num_samples=args.samples,
local_tolerance=args.local_tolerance,
global_tolerance=default.global_convergence_tolerance,
num_processes=args.mpi_processes)
class CommandPlot(Command):
"""Plot results"""
name = 'plot'
def setup_parser(self, subparsers):
"""Command-line options for plotting results."""
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
g = p.add_mutually_exclusive_group(required=True)
g.add_argument('-v', '--voronoi', action='store_true',
help='plot Voronoi tessellation (default is '
'%(default)s)')
g.add_argument('-H', '--histograms', action='store_true',
default=False, help='plot histograms of '
'distributions at each milestone')
p.add_argument('-l', '--labels', action='store_true',
default=False, help='plot milestone indices '
'(default is %(default)s)')
p.add_argument('-n', '--num-bins', required=False,
help='number of bins for histogram (default '
'is %(default)s)')
p.add_argument('-s', '--marker-size', type=int, required=False,
default=default.marker_size, help='set marker size '
'(default is %(default)d)')
b = p.add_argument('-c', '--colors', type=str, required=False,
default=default.colors, help='specify color '
'scheme (default is %(default)s)')
if argcomplete:
import matplotlib.cm as cm
color_maps = list(cm.datad.keys())
completer = argcomplete.completers.ChoicesCompleter
b.completer = completer(color_maps)
p.add_argument('-t', '--title', type=str, required=False,
help='set title')
p.add_argument('-b', '--colorbar-title', type=str,
metavar='CB-TITLE', required=False,
help='set color bar title')
p.add_argument('-m', '--min-value', type=float, required=False,
default=0.0, help='set lower bound for data')
p.add_argument('-M', '--max-value', type=float, required=False,
default=None, help='set upper bound for data')
p.add_argument('-x', '--xlabel', type=str, required=False,
default='$x_1$', help='set label of x axis')
p.add_argument('-y', '--ylabel', type=str, required=False,
default='$x_2$', help='set label of y axis')
a = p.add_argument('-i', '--input', type=str, # required=True,
metavar='FILE', action='append',
help='path(s) to input data file(s)')
if argcomplete:
a.completer = argcomplete.completers.FilesCompleter()
p.add_argument('-o', '--output', type=str, required=False,
metavar='FILE', help='output figure name')
def do(self, args):
simulation = Simulation(args.config, catch_signals=False,
setup_reactants_and_products=False)
if args.title is None and args.input is not None:
args.title = args.input[0]
from miles.plot import plot
plot(simulation, **args.__dict__)
class CommandLong(Command):
"""Run long trajectory simulation"""
name = 'long'
def setup_parser(self, subparsers):
"""Command-line options for long trajectories."""
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
p.add_argument('-i', '--interval', type=int, metavar='N',
default=default.save_interval, required=False,
help='save results to disk every %(metavar)s '
'transitions (default is %(default)d)')
p.add_argument('-m', '--max-trajectories', type=int,
required=False, metavar='MAX-TRAJECTORIES',
default=default.trajectory_max_trajectories,
help='maximum number of transitions to '
'sample (default is %(default)d)')
p.add_argument('-M', '--mpi-processes', type=int,
required=True, help='number of MPI processes')
def do(self, args):
simulation = Simulation(args.config)
from miles.long import long
long(simulation, max_trajectories=args.max_trajectories,
num_processes=args.mpi_processes)
class CommandMkDist(Command):
"""Create distribution file from database"""
name = 'mkdist'
def setup_parser(self, subparsers):
"""Command line options for creation of distribution files."""
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
p.add_argument('-d', '--database', type=str,
metavar='DIRECTORY', required=False,
help='directory where the database is located '
'(default is set to the path specified in the '
'configuration file)')
p.add_argument('-m', '--milestone', metavar='MILESTONE',
action='append', help='restrict to specified '
'milestone(s)')
p.add_argument('-o', '--output', type=str,
metavar='FILE', required=True,
help='name of the file where the distributions '
'will be written')
p.add_argument('-l', '--less-than', type=int, metavar='NUM-POINTS',
required=False, help='maximum number of points that a'
'milestone should have')
p.add_argument('-g', '--greater-than', type=int, metavar='NUM-POINTS',
required=False, help='minimum number of points that a'
'milestone should have')
group = p.add_mutually_exclusive_group(required=False)
group.add_argument('-r', '--reset-velocities',
action='store_true', dest='reset_velocities',
default=True, required=False, help='set initial '
'velocities from Maxwell-Boltzmann distribution '
'(default is %(default)s)')
group.add_argument('-n', '--no-reset-velocities',
action='store_false', dest='reset_velocities',
default=False, required=False, help='do not set '
'initial velocities from Maxwell-Boltzmann '
'distribution (default is %(default)s)')
def do(self, args):
simulation = Simulation(args.config, catch_signals=False,
setup_reactants_and_products=False)
milestones = simulation.milestones
if args.database is not None:
collective_variables = simulation.collective_variables
db = load_database(args.database, collective_variables)
else:
db = simulation.database
ds = db.to_distributions(args.reset_velocities)
new_ds = Distributions()
restricted_milestones = set()
if args.milestone:
for mls in args.milestone:
restricted_milestone = milestones.make_from_str(mls)
restricted_milestones.add(restricted_milestone)
for milestone in ds.keys():
if args.milestone and milestone not in restricted_milestones:
continue
d = ds[milestone]
l = len(d)
if not ((args.greater_than and l < args.greater_than)
or (args.less_than and l > args.less_than)):
new_ds[milestone] = d
print('{} has {} points'.format(milestone, l))
save_distributions(new_ds, args.output)
num_distributions = len(list(new_ds.keys()))
logging.info('Wrote {} distribution(s) to {!r}.'
.format(num_distributions, args.output))
class CommandLsDist(Command):
"""Print information about a set of distributions"""
name = 'lsdist'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
a = p.add_argument('-i', '--input', type=str, required=True,
metavar='FILE', help='path to file '
'containing distributions')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
a.completer = completer(allowednames='dst')
def do(self, args):
try: # Attempt to use reactants and products if there are any.
simulation = Simulation(args.config, catch_signals=False)
except:
simulation = Simulation(args.config, catch_signals=False,
setup_reactants_and_products=False)
milestones = simulation.milestones
try:
distributions = load_distributions(args.input)
except FileNotFoundError:
logging.error('Unable to find {!r}'.format(args.input))
sys.exit(-1)
known_milestones = sorted(distributions.keys())
for milestone in known_milestones:
distribution = distributions[milestone]
msg = ('{} has {} points'
.format(milestone, len(distribution)))
if milestone in milestones.reactants:
print(colored(bold(' '.join([msg, '(reactant)'])), 'red'))
elif milestone in milestones.products:
print(colored(bold(' '.join([msg, '(product)'])), 'green'))
else:
print(msg)
class CommandPath(Command):
"""Compute max-weight path"""
name = 'path'
example = '''
Example
-------
The command
miles path simulation.cfg --reactant 0,1 --product 2,3 --product 3,4 \\
--transition-matrix K.mtx --stationary-vector q.dat
will compute the maximum weight path between the milestone 0,1 (reactant)
and milestones 2,3 or 3,4 (products).
'''
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
epilog=self.example,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description.capitalize())
self.add_argument_config(p)
p.add_argument('-K', '--transition-matrix', type=str,
metavar='FILE', required=True,
help='file name of transition matrix')
p.add_argument('-q', '--stationary-vector', type=str,
metavar='FILE', required=True,
help='file name of stationary vector')
p.add_argument('-r', '--reactant', metavar='MILESTONE',
action='append', help='use specified milestone(s) '
'as reactant')
p.add_argument('-p', '--product', metavar='MILESTONE',
action='append', help='use specified milestone(s) '
'as product')
p.add_argument('-o', '--output', metavar='FILE', type=str,
default='path.dat', help='file name where '
'to save the maximum weight path (default '
'is %(default)s)')
def do(self, args):
import scipy.io
import numpy as np
from miles.max_weight_path import max_weight_path
simulation = Simulation(args.config, catch_signals=False)
K = scipy.io.mmread(args.transition_matrix).tocoo()
q = np.loadtxt(args.stationary_vector)
milestones = simulation.milestones
if args.reactant:
reactant_indices = {milestones.make_from_str(a).index
for a in args.reactant}
else:
reactant_indices = {m.index for m in milestones.reactants}
if args.product:
product_indices = {milestones.make_from_str(a).index
for a in args.product}
else:
product_indices = {m.index for m in milestones.products}
print('Reactant milestones:')
for idx in reactant_indices:
print(' {}'.format(milestones.make_from_index(idx)))
print('Product milestones:')
for idx in product_indices:
print(' {}'.format(milestones.make_from_index(idx)))
path = max_weight_path(K, q, reactant_indices, product_indices)
np.savetxt(args.output, path)
print('Maximum weight path written to {!r}'.format(args.output))
class CommandCite(Command):
"""Obtain citations of relevant papers"""
name = 'cite'
def setup_parser(self, subparsers):
description = self.__doc__
subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
def do(self, args):
bibtex_citation = bold('The exact milestoning algorithm is '
'described in:') + r"""
@article{Bello-Rivas2015,
author = {Bello-Rivas, J. M. and Elber, R.},
doi = {10.1063/1.4913399},
issn = {0021-9606},
journal = {The Journal of Chemical Physics},
month = {mar},
number = {9},
pages = {094102},
title = {{Exact milestoning}},
url = {https://doi.org/10.1063/1.4913399},
volume = {142},
year = {2015}
}
""" + \
bold('The algorithm for the computation of global '
'max-weight paths is described in:') + r"""
@article{Viswanath2013,
author = {Viswanath, S. and Kreuzer, S. M. and
Cardenas, A. E. and Elber, R.},
doi = {10.1063/1.4827495},
issn = {00219606},
journal = {The Journal of Chemical Physics},
month = {nov},
number = {17},
pages = {174105},
title = {{Analyzing milestoning networks for molecular kinetics:
definitions, algorithms, and examples}},
url = {https://doi.org/10.1063/1.4827495},
volume = {139},
year = {2013}
}"""
print(bibtex_citation)
DISCLAIMER = ("""{} the stationary distributions obtained by the {}
and {} commands are only valid for databases generated by the {}
command.""".format(bold('Warning:'), bold('analyze'),
bold('resample'), bold('long')))
class CommandAnalyze(Command):
"""Analyze results and generate output files"""
name = 'analyze'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
epilog=DISCLAIMER,
description=description.capitalize())
self.add_argument_config(p)
p.add_argument('-o', '--output', metavar='FILE',
type=str, default='stationary-analyze.dst',
help='file name where to save the stationary '
'distributions (default is %(default)s)')
p.add_argument('-K', '--transition-matrix', type=str,
metavar='FILE', default='K.mtx',
help='file name of the transition matrix '
'(default is %(default)s)')
p.add_argument('-T', '--lag-time-matrix', type=str,
metavar='FILE', default='T.mtx',
help='file name of the lag time matrix '
'(default is %(default)s)')
p.add_argument('-q', '--stationary-flux', type=str,
metavar='FILE', default='q.dat',
help='file name of the stationary flux vector '
'(default is %(default)s)')
p.add_argument('-t', '--local-mfpts', type=str,
metavar='FILE', default='t.dat',
help='file name of the vector of local MFPTs '
'(default is %(default)s)')
p.add_argument('-p', '--stationary-probability', type=str,
metavar='FILE', default='p.dat',
help='file name of the stationary probability vector '
'(default is %(default)s)')
def do(self, args):
simulation = Simulation(args.config, catch_signals=False)
from miles.analyze import TransitionKernel, analyze
kernel = TransitionKernel(simulation.database)
mfpt = analyze(kernel, args.output, args.transition_matrix,
args.lag_time_matrix, args.stationary_flux,
args.local_mfpts, args.stationary_probability)
logging.info('Mean first passage time: {:.4f} units of time.'
.format(mfpt))
class CommandCommittor(Command):
"""Compute the committor function"""
name = 'committor'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
p.add_argument('-K', '--transition-matrix', type=str,
metavar='FILE', required=True,
help='file name of the transition matrix')
p.add_argument('-o', '--output', metavar='FILE', type=str,
default='committor.dat', help='file name where '
'to save the committor vector (default '
'is %(default)s)')
def do(self, args):
import scipy.io
import numpy as np
from miles.committor import committor
simulation = Simulation(args.config, catch_signals=False)
K = scipy.io.mmread(args.transition_matrix).tocsr()
milestones = simulation.milestones
reactant_indices = {m.index for m in milestones.reactants}
product_indices = {m.index for m in milestones.products}
committor_vector = committor(K, reactant_indices, product_indices)
np.savetxt(args.output, committor_vector)
print('Committor function written to {!r}'.format(args.output))
class CommandResample(Command):
"""Resample database and analyze results"""
name = 'resample'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
epilog=DISCLAIMER,
description=description.capitalize())
self.add_argument_config(p)
p.add_argument('-s', '--samples', type=int,
default=10*default.samples_per_milestone_per_iteration,
metavar='NUM-SAMPLES', help='number of '
'trajectory fragments per milestone to '
'sample (default is %(default)d)')
p.add_argument('-d', '--database', metavar='DIRECTORY',
type=str, default='database-resample',
help='output database directory name '
'(default is %(default)s)')
p.add_argument('-o', '--output', metavar='FILE', type=str,
default='stationary-resample.dst', help='file '
'name where to save the resampled stationary '
'distributions (default is %(default)s)')
p.add_argument('-K', '--transition-matrix', type=str,
metavar='FILE', default='K-resample.mtx',
help='file name of the transition matrix '
'(default is %(default)s)')
p.add_argument('-T', '--lag-time-matrix', type=str,
metavar='FILE', default='T-resample.mtx',
help='file name of the lag time matrix '
'(default is %(default)s)')
p.add_argument('-q', '--stationary-flux', type=str,
metavar='FILE', default='q-resample.dat',
help='file name of the stationary flux vector '
'(default is %(default)s)')
p.add_argument('-t', '--local-mfpts', type=str,
metavar='FILE', default='t-resample.dat',
help='file name of the vector of local MFPTs '
'(default is %(default)s)')
p.add_argument('-p', '--stationary-probability', type=str,
metavar='FILE', default='p.dat',
help='file name of the stationary probability vector '
'(default is %(default)s)')
def do(self, args):
simulation = Simulation(args.config, catch_signals=False)
from miles.resample import resample
resample(simulation, args.samples, args.database, args.output,
args.transition_matrix, args.lag_time_matrix,
args.stationary_flux, args.local_mfpts,
args.stationary_probability)
class CommandVersion(Command):
"""Display program's version"""
name = 'version'
def setup_parser(self, subparsers):
description = self.__doc__
subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
def do(self, args):
print(bold(version.v_gnu))
class CommandReset(Command):
"""Reset velocities in a distribution"""
name = 'reset'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
a = p.add_argument('-i', '--input', type=str, required=True,
metavar='FILE', help='path to '
'file containing distributions')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
a.completer = completer(allowednames='dst')
p.add_argument('-o', '--output', type=str, required=True,
metavar='FILE', help='path to file '
'where to save the transformed distributions')
def _reset_velocities(self, ds):
pass
def do(self, args):
try:
distributions = load_distributions(args.input)
except FileNotFoundError:
logging.error('Unable to find {!r}'.format(args.input))
sys.exit(-1)
self._reset_velocities(distributions)
save_distributions(distributions, args.output)
class CommandMPI(Command):
"""Start MPI services
"""
name = 'mpi'
def setup_parser(self, subparsers):
description = self.__doc__.strip()
subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
def do(self, args):
from miles.timestepper_service import timestepper_service
timestepper_service()
class CommandPrepare(Command):
"""Sample first hitting points
"""
name = 'prepare'
_suffixes = ('coor', 'vel', 'xsc', 'dcd', 'dvd', 'dcdvel')
def setup_parser(self, subparsers):
description = self.__doc__.strip()
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
a = p.add_argument('-i', '--input', type=str, required=True,
metavar='FILE', help='path to file '
'containing initial results for the MD program')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
a.completer = completer(allowednames=self._suffixes)
p.add_argument('-m', '--milestone', metavar='MILESTONE',
required=False, help='restrict to specified milestone')
p.add_argument('--start', type=int, required=False,
metavar='NUM-CROSSINGS', default=0,
help='number of milestone crossings to obtain before '
'starting to save to database (default is %(default)s)')
p.add_argument('--stop', type=int, required=True,
metavar='NUM-CROSSINGS', help='maximum number of '
'milestone crossings to obtain')
p.add_argument('--step', type=int, required=False,
metavar='NUM-CROSSINGS', default=1,
help='number of milestone crossings to skip (default '
'is %(default)s)')
def do(self, args):
simulation = Simulation(args.config, catch_signals=True,
setup_reactants_and_products=False)
from miles.prepare import prepare
if args.milestone:
milestone = simulation.milestones.make_from_str(args.milestone)
else:
milestone = None
prepare(simulation, milestone, args.input, args.start,
args.stop, args.step)
class CommandPlay(Command):
"""Find hitting points in trajectory files
"""
name = 'play'
_suffixes = ('coor', 'vel', 'xsc', 'dcd', 'dvd', 'dcdvel')
def setup_parser(self, subparsers):
description = self.__doc__.strip()
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
a = p.add_argument('-i', '--input', type=str, required=True,
metavar='FILE', help='path to file '
'containing initial results for the MD program')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
a.completer = completer(allowednames=self._suffixes)
p.add_argument('-m', '--milestone', metavar='MILESTONE',
required=False, help='restrict to specified milestone')
p.add_argument('-c', '--crossings', required=False, default=False,
action='store_true', help='include same-milestone '
'crossings in addition to transitions between '
'neighboring milestones (default is %(default)s)')
p.add_argument('--start', type=int, required=False,
metavar='NUM-CROSSINGS', default=0,
help='number of milestone crossings to obtain before '
'starting to save to database (default is %(default)s)')
p.add_argument('--stop', type=int, required=False,
default=float('inf'), metavar='NUM-CROSSINGS',
help='maximum number of milestone crossings to obtain'
' (default is %(default)s)')
p.add_argument('--step', type=int, required=False,
metavar='NUM-CROSSINGS', default=1,
help='number of milestone crossings to skip (default '
'is %(default)s)')
def do(self, args):
simulation = Simulation(args.config, catch_signals=True,
setup_reactants_and_products=False)
from miles.play import play
if args.milestone:
milestone = simulation.milestones.make_from_str(args.milestone)
else:
milestone = None
play(simulation, milestone, args.input, args.crossings,
args.start, args.stop, args.step)
class CommandMkdb(Command):
"""Create database of first hitting points"""
name = 'mkdb'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
a = p.add_argument('-a', '--anchors', type=str, required=True,
metavar='FILE', help='path to file '
'containing anchors in CSV format')
t = p.add_argument('-t', '--transitions', type=str,
metavar='FILE', help='path to file '
'containing transitions in CSV format')
if argcomplete:
completer = argcomplete.completers.FilesCompleter
a.completer = completer(allowednames='csv')
t.completer = completer(allowednames='csv')
def do(self, args):
config = Configuration()
config.parse(args.config)
from miles import ColvarsParser
colvars_parser = ColvarsParser(config.colvars_file)
print('Using collective variables defined in {!r}.'
.format(config.colvars_file))
print('Please verify that the collective variables shown '
'below are correct.\n')
print(colvars_parser)
collective_variables = colvars_parser.collective_variables
database = make_database(config.database_file, collective_variables)
database.insert_anchors_from_csv(args.anchors)
if args.transitions is not None:
database.insert_transitions_from_csv(args.transitions)
class CommandFsck(Command):
"""Verify database integrity"""
name = 'fsck'
def setup_parser(self, subparsers):
description = self.__doc__
p = subparsers.add_parser(self.name, help=description.lower(),
description=description.capitalize())
self.add_argument_config(p)
def do(self, args):
simulation = Simulation(args.config, catch_signals=False)
from miles.fsck import fsck
status = fsck(simulation)
sys.exit(status)
command_list = [CommandMkdb(),
CommandMkDist(),
CommandLsDist(),
CommandPrepare(),
CommandPlay(),
CommandMPI(),
CommandRun(),
CommandPlot(),
CommandLong(),
CommandAnalyze(),
CommandCommittor(),
CommandPath(),
CommandFsck(),
CommandVersion(),
CommandCite(),
# CommandResample(),
# CommandReset(),
# CommandSample(),
]
| mit |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| mit |
sniemi/SamPy | focus/FocusModel.py | 1 | 20065 | """
User Interface for Focus Monitor
"""
import glob
import string, time, datetime
from math import *
from matplotlib import dates
from matplotlib.dates import MinuteLocator, DateFormatter
from Tkinter import *
import tkMessageBox
import numpy as N
import pylab as P
def modelled(camera, date, startTime, stopTime):
"""
Date in form of a string: month day year. Times as 15:33 - 24 hour clock
"""
print 'Modelled', camera, date, startTime, stopTime
thermal = "/Users/niemi/Desktop/temp/hst/OTA/thermal/"
# Focus model offsets
camConst = {'PC': 261.1, 'HRC': 261.0, 'WFC1': 259.7, 'WFC2': 260.35}
secMove = {'2004.12.22': 4.16, '2006.07.31': 5.34}
# Define data lists
julian = []
temp1 = []
temp2 = []
temp3 = []
temp4 = []
temp5 = []
temp6 = []
hours = []
focusDate = time.strptime(date, '%m/%d/%Y')
timeAxis = []
year = focusDate[0]
month = focusDate[1]
day = focusDate[2]
# Get date-dependent focus adjustment
focusShift = 0.0
dateStamp = '%4d.%02d.%02d' % (year, month, day)
for k in secMove.keys():
if dateStamp > k:
focusShift = focusShift + secMove[k]
print 'Secondary mirror move ', focusShift
dayOfYear = focusDate[7]
dayString = "%03d" % dayOfYear
yearString = str(year)
start = string.split(startTime, ':')
stop = string.split(stopTime, ':')
startHour = int(start[0])
startMinute = int(start[1])
stopHour = int(stop[0])
stopMinute = int(stop[1])
jday = toJulian(year, month, day)
jstart = jday + (startHour + startMinute / 60.0) / 24.0 - 40.0 / (60.0 * 24.0) # 40 minute backtrack
jstop = jday + (stopHour + stopMinute / 60.0) / 24.0
fileName = 'thermalData' + yearString + '.dat'
#if not(os.access(thermal + fileName, os.F_OK)): # then check Chris Long's file
f = open(thermal + fileName, 'r')
while f:
line = f.readline()
if line == '': break
columns = string.split(line)
timeStamp = columns[0]
jul = float(columns[1])
if jstart <= jul <= jstop:
julian.append(jul)
tup = fromJulian(jul)
hr = tup[3] + (tup[4] + tup[5] / 60.0) / 60.0 # Extract hours
hours.append(hr)
tobj = datetime.datetime(tup[0], tup[1], tup[2], tup[3], tup[4], tup[5])
timeAxis.append(tobj)
num = dates.date2num(tobj)
temp1.append(float(columns[2]))
temp2.append(float(columns[3]))
temp3.append(float(columns[4]))
temp4.append(float(columns[5]))
temp5.append(float(columns[6]))
temp6.append(float(columns[7]))
if day > dayOfYear: break
f.close()
if len(temp1) == 0: # No temperature data in time range - Check Chris Long file
longFile = glob.glob(thermal + '/breathing2009/BreathingData' + '*') # will produce a list
if len(longFile) > 0:
longFile.sort()
print 'Using ', longFile[-1] # Use latest version
f = open(longFile[-1], 'r')
while f:
line = f.readline()
if line == '': break
columns = string.split(line)
timeStamp = columns[0]
jul = float(columns[1])
if jstart <= jul <= jstop:
julian.append(jul)
tup = fromJulian(jul)
hr = tup[3] + (tup[4] + tup[5] / 60.0) / 60.0 # Extract hours
hours.append(hr)
tobj = datetime.datetime(tup[0], tup[1], tup[2], tup[3], tup[4], tup[5])
timeAxis.append(tobj)
num = dates.date2num(tobj)
temp1.append(float(columns[2]))
temp2.append(float(columns[3]))
temp3.append(float(columns[4]))
temp4.append(float(columns[5]))
temp5.append(float(columns[6]))
temp6.append(float(columns[7]))
if day > dayOfYear: break
f.close()
else:
print 'Did not find Chris Long file'
print 'No matching thermal data file'
gui.statusBar.config(text='No matching thermal data file')
return
jtime = N.array(julian)
aftLS = N.array(temp1)
trussAxial = N.array(temp2)
trussDiam = N.array(temp3)
aftShroud = N.array(temp4)
fwdShell = N.array(temp5)
lightShield = N.array(temp6)
#tBreath is value of light shield temp minus average of previous eight values
tBreath = lightShield.copy() # Make a real copy
l = N.size(tBreath)
if l < 10:
print 'No temperature data'
gui.statusBar.config(text='No temperature data')
return
r1 = range(8)
tBreath[r1] = 0.0
r2 = range(8, l)
for r in r2:
tBreath[r] = 0.7 * (lightShield[r] - sum(lightShield[r - 8:r]) / 8.0)
focusModel = camConst[camera] + focusShift\
- 0.0052 * jtime + 0.48 * aftLS + 0.81 * trussAxial - 0.28 * aftShroud + 0.18 * fwdShell + 0.55 * tBreath
print 'Average model%10.2f' % (N.mean(focusModel[8:]))
# Just the Bely term
Bely = 0.55 * tBreath
bShift = N.mean(focusModel) - N.mean(Bely)
Bely = Bely + bShift
# Time independent Focus model with mean zero offset
flatModel = camConst[camera] + focusShift\
+ 0.48 * aftLS + 0.81 * trussAxial - 0.28 * aftShroud + 0.18 * fwdShell + 0.55 * tBreath - 281.64
print 'Flat model%10.2f' % (N.mean(flatModel[8:]))
# Make up an output file
op = open('plotdata' + dateStamp + '.txt', 'w')
op.write('Julian Date Date Time Model Flat Model\n')
for r in range(8, l):
dataString1 = '%12.6f' % jtime[r]
dataString2 = timeAxis[r].strftime(' %b %d %Y %H:%M:%S')
dataString3 = '%8.4f %8.4f \n' %\
(focusModel[r], flatModel[r])
op.write(dataString1 + dataString2 + dataString3)
t = timeAxis[r]
#print t.strftime('%b %d %Y %H:%M:%S')
op.close()
# Set up for plots
P.figure(1)
P.clf()
P.subplot(211)
P.ylabel('Degrees C')
P.title('Temperatures ' + date)
P.plot(dates.date2num(timeAxis), temp3)
P.plot(dates.date2num(timeAxis), temp4)
P.plot(dates.date2num(timeAxis), temp5)
ax = P.gca()
ax.xaxis.set_major_locator(MinuteLocator((0, 20, 40)))
ax.xaxis.set_major_formatter((DateFormatter('%H:%M')))
P.legend(('Truss Dia', 'Aft Shr', 'Fwd Sh'), loc='upper left')
P.grid(True)
P.subplot(212)
P.plot(dates.date2num(timeAxis), temp1)
P.plot(dates.date2num(timeAxis), temp6)
P.legend(('Aft LS', 'Light Sh'))
P.xlabel('Time')
P.ylabel('Degrees C')
ax2 = P.gca()
ax2.xaxis.set_major_locator(MinuteLocator((0, 20, 40)))
ax2.xaxis.set_major_formatter((DateFormatter('%H:%M')))
P.grid(True)
P.figure(2)
#P.clf()
P.plot(hours[8:], focusModel[8:], '-ro')
#P.plot(hours[8:], Bely[8:], '-g+')
P.title('Model ' + date)
P.xlabel('Time')
P.ylabel('microns')
#print gui.display
if gui.display == 'Comparison': P.legend(('Measured', 'Model'), loc='upper right')
P.grid(True)
P.draw()
return
def measured(camera, testDate):
"""
Extract focus measurements
"""
print 'MEASURED'
fDate = []
fJulian = []
fActual = []
dateList = []
measure = open(camera + 'FocusHistory.txt', 'r')
focusData = measure.readlines()
measure.close()
for k in range(10): print focusData[k] #Temporary test
count = len(focusData)
print count, 'lines in History file'
for l in range(2, count):
pieces = string.split(focusData[l])
if len(pieces) > 0:
dataSet = pieces[0]
if dataSet != '0':
fDate.append(pieces[2])
fJulian.append(pieces[3])
fActual.append(pieces[4])
entries = len(fDate)
print entries, ' entries'
plotJulian = []
plotTime = []
plotFocus = []
for e in range(entries):
if fDate[e] == testDate:
t = float(fJulian[e])
h = 24.0 * (t - floor(t))
plotJulian.append(t)
plotTime.append(h)
plotFocus.append(float(fActual[e]))
meanF = sum(plotFocus) / len(plotFocus)
print 'Average measurement%10.2f' % (meanF)
# Reformat date
pDate = time.strptime(testDate, '%m/%d/%y')
dateText = time.asctime(pDate)
datePieces = string.split(dateText)
plotDate = datePieces[1] + ' ' + datePieces[2] + ' ' + datePieces[4]
P.figure(2)
P.clf()
P.plot(plotTime, plotFocus, '-bo')
P.grid(True)
P.title(camera + ' Focus measurement ' + plotDate)
P.xlabel('Time')
P.ylabel('Microns')
P.draw()
return (plotJulian[0], plotJulian[-1])
def toJulian(year, month, day):
"""
Use time functions
"""
dateString = str(year) + ' ' + str(month) + ' ' + str(day) + ' UTC'
tup = time.strptime(dateString, '%Y %m %d %Z')
sec = time.mktime(tup)
days = (sec - time.timezone) / 86400.0 # Cancel time zone correction
jday = days + 40587 # Julian date of Jan 1 1900
return jday
def fromJulian(j):
days = j - 40587 # From Jan 1 1900
sec = days * 86400.0
tup = time.gmtime(sec)
return tup
def comparison(camera, date):
monthName = ['blank', 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
times = measured(camera, date) # times now contains start and end Julian times
t0 = times[0]
tp0 = fromJulian(t0)
h0 = 24.0 * (t0 - int(t0))
ih0 = int(h0)
m0 = int(60 * (h0 - int(h0)))
t1 = times[1]
tp1 = fromJulian(t1)
h1 = 24.0 * (t1 - int(t1))
ih1 = int(h1)
m1 = int(60 * (h1 - int(h1)))
dateString = '%2d/%2d/%4d' % (tp1[1], tp1[2], tp1[0])
dateString = string.lstrip(dateString) # clean up leading blanks
startTime = '%02d:%02d' % (h0, m0)
stopTime = '%02d:%02d' % (h1, m1)
modelled(camera, dateString, startTime, stopTime)
class FocusMenu(object):
def __init__(self):
self.root = None
def CreateForm(self):
self.root = Tk()
self.root.title('HST Focus Monitor')
self.disp = StringVar()
self.disp.set('Modelled')
self.cam = StringVar()
self.cam.set('PC')
self.year = IntVar()
Label(self.root, text='Display').grid(row=1, column=2)
Label(self.root, text='Camera').grid(row=1, column=0, sticky=W)
# Camera choice
self.rc1 = Radiobutton(self.root, text='PC', variable=self.cam, value='PC')
self.rc1.grid(row=3, column=0, sticky=W)
self.rc2 = Radiobutton(self.root, text='HRC', variable=self.cam, value='HRC')
self.rc2.grid(row=4, column=0, sticky=W)
#Display choice
self.rd1 = Radiobutton(self.root, text='Modelled', variable=self.disp, value='Modelled')
self.rd1.grid(row=3, column=2, sticky=W)
self.rd2 = Radiobutton(self.root, text='Measured', variable=self.disp, value='Measured')
self.rd2.grid(row=4, column=2, sticky=W)
self.rd3 = Radiobutton(self.root, text='Comparison', variable=self.disp, value='Comparison')
self.rd3.grid(row=5, column=2, sticky=W)
# Year of model or measurement
Label(self.root, text='Year to display').grid(row=6, column=0, sticky=W)
self.yearEntry = Entry(self.root)
self.yearEntry.grid(row=6, column=1)
self.b1 = Button(self.root, text='Proceed', command=self.GetDate)
self.b1.grid(row=13, column=1)
self.b2 = Button(self.root, text='Exit', command=self.Finish)
self.b2.grid(row=13, column=2)
self.statusBar = Label(self.root, text='', foreground='blue')
self.statusBar.grid(row=11, sticky=S)
def Show(self):
self.root.mainloop()
def GetDate(self):
# Validate year and date input
firstYear = 2003
lastYear = time.gmtime()[0] # Get current year
self.display = self.disp.get()
self.camera = self.cam.get()
self.year = self.yearEntry.get()
goodyear = False
try:
iy = int(self.year)
if iy < firstYear or iy > lastYear:
self.statusBar.config(text='Year must be between 2003 and current year')
else:
goodyear = True
except ValueError:
self.statusBar.config(text='Bad format for Year')
msg.grid(row=7, column=0)
if goodyear: # Show selection for exact date
if self.display == 'Measured' or self.display == 'Comparison':
#print 'Display', self.display
focusFile = self.camera + 'FocusHistory.txt'
print 'Focus File', focusFile # Temporary
measure = open(focusFile, 'r')
focusData = measure.readlines()
lf = len(focusData)
measure.close()
self.dateList = [] # Prepare to collect measurement dates from one year
for l in range(1, lf): #Skip first line
pieces = string.split(focusData[l])
if len(pieces) > 1 and pieces[0] != "0": # Skip blank and meaningless lines
[m, d, y] = string.split(pieces[2], '/')
if int(y) == iy % 100: # Match last two digits of year
dateStamp = pieces[2] # If this is different from previous date, add to list
if len(self.dateList) == 0 or self.dateList[-1] != dateStamp: self.dateList.append(
dateStamp)
# Now make up listbox with dateList
ly = len(self.dateList)
if ly == 0:
nodata = 'No data for ' + self.camera + ' in ' + self.year
self.statusBar.config(text=nodata)
goodyear = False
return
Label(self.root, text='Choose date of measurement').grid(row=7, column=0, sticky=NW)
self.measureDates = Listbox(self.root, height=ly, selectmode=SINGLE)
self.measureDates.grid(row=7, column=1)
self.dateChoice = None # until chosen
for i in range(ly): self.measureDates.insert(END, self.dateList[i])
self.measureDates.bind("<Button1-ButtonRelease>", self.ChooseDate)
self.b1.grid_forget() # Remove PROCEED button
self.rc1.config(state=DISABLED) # Turn off camera and display choices
self.rc2.config(state=DISABLED)
self.rd1.config(state=DISABLED)
self.rd2.config(state=DISABLED)
self.rd3.config(state=DISABLED)
self.yearEntry.config(state=DISABLED)
self.statusBar.config(text='')
Button(self.root, text='Display', command=self.StartGraph).grid(row=8, column=1)
elif self.display == 'Modelled': # Select arbitrary dates and times
#print 'Display',self.display
self.b1.grid_forget() # Remove PROCEED button
self.rc1.config(state=DISABLED) # Turn off camera and display choices
self.rc2.config(state=DISABLED)
self.rd1.config(state=DISABLED)
self.rd2.config(state=DISABLED)
self.rd3.config(state=DISABLED)
self.yearEntry.config(state=DISABLED)
self.statusBar.config(text='')
Label(self.root, text='Date e.g. 11/23').grid(row=8, column=0, sticky=W)
self.dayEntry = Entry(self.root)
self.dayEntry.grid(row=8, column=1)
Label(self.root, text='Start time in form 12:23').grid(row=9, column=0, sticky=W)
self.time1Entry = Entry(self.root)
self.time1Entry.grid(row=9, column=1)
self.time2Entry = Entry(self.root)
self.time2Entry.grid(row=10, column=1)
Label(self.root, text='Use 24 hour clock').grid(row=9, column=2)
Label(self.root, text='Stop time').grid(row=10, column=0, sticky=W)
Button(self.root, text='Display', command=self.GetTimes).grid(row=11, column=1)
def GetTimes(self):
self.day = self.dayEntry.get()
try:
(m, d) = string.split(self.day, '/')
if (1 <= int(m) <= 12) and (1 <= int(d) <= 31):
goodday = True
else:
self.statusBar.config(text='Numerical error in date')
goodday = False
except ValueError:
self.statusBar.config(text='Date to be in form mm/dd')
goodday = False
self.time1 = self.time1Entry.get()
try:
(hour1, minute1) = string.split(self.time1, ':')
if (0 <= int(hour1) <= 23) and (0 <= int(minute1) <= 59):
goodstart = True
else:
self.statusBar.config(text='Numerical error in start time')
goodstart = False
except ValueError:
self.statusBar.config(text='Start time to be in form hh:mm')
goodstart = False
if goodstart: print 'h1m1', hour1, minute1
self.time2 = self.time2Entry.get()
try:
(hour2, minute2) = string.split(self.time2, ':')
if (0 <= int(hour2) <= 23) and (0 <= int(minute2) <= 59):
goodstop = True
else:
self.statusBar.config(text='Numerical error in stop time')
goodstop = True
except ValueError:
self.statusBar.config(text='Stop time to be in form hh:mm')
goodstop = False
if goodstop: print 'h2m2', hour2, minute2
# Final check
if goodstart and goodstop:
t1 = int(hour1) + int(minute1) / 60.0
t2 = int(hour2) + int(minute2) / 60.0
if t1 > t2:
goodstop = False
self.statusBar.config(text='Start time is later than stop time').grid(row=11, column=0)
else:
goodstop = True
if goodday and goodstart and goodstop:
self.statusBar.config(text='Generating graphs')
graph() # Start display
return
def ChooseDate(self, event):
item = self.measureDates.curselection()
self.dateChoice = self.dateList[int(item[0])]
def StartGraph(self):
if self.dateChoice: # Proceed only if date choice has been made
self.statusBar.config(text='Generating graphs')
graph()
#self.statusBar.config(text = 'Finished')
else: self.statusBar.config(text='Choose a date')
def Finish(self):
self.root.destroy()
P.close('all')
def graph():
monthName = ['blank', 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
print 'Create display'
print 'Camera', gui.camera
print 'Display ', gui.display
if gui.display == 'Modelled':
#print gui.day, gui.year, gui.time1, gui.time2
fulldate = "%5s/%4s" % (gui.day, gui.year)
fulldate = string.lstrip(fulldate)
modelled(gui.camera, fulldate, gui.time1, gui.time2)
elif gui.display == 'Measured':
#print 'Date ', gui.dateChoice
times = measured(gui.camera, gui.dateChoice)
elif gui.display == "Comparison":
#print gui.camera, gui.dateChoice
comparison(gui.camera, gui.dateChoice)
print 'Finished'
#gui.statusBar.config(text = 'Finished')
return
if __name__ == '__main__':
gui = FocusMenu() # Creates an instance of FocusMenu
gui.CreateForm() # Builds the form
gui.Show() # Starts the loop
| bsd-2-clause |
bssrdf/pmtk3 | python/demos/ch04/discrimAnalysisDboundariesDemo.py | 7 | 2629 | #!/usr/bin/env python
import numpy as np
import matplotlib.pylab as pl
from sklearn.lda import LDA
from sklearn.qda import QDA
c = 'bgr'
m = 'xos'
n_samples = 30 # number of each class samplesn
model_names = ('LDA', 'QDA')
def mvn2d(x, y, u, sigma):
"""calculate the probability of 2d-guss"""
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
sigma_inv = np.linalg.inv(sigma)
z = np.dot((xy - u), sigma_inv)
z = np.sum(z * (xy - u), axis=1)
z = np.exp(-0.5 * z)
z = z / (2 * np.pi * np.linalg.det(sigma) ** 0.5)
return z.reshape(xx.shape)
models = [([[1.5, 1.5], [-1.5, -1.5]], # means
[np.eye(2)] * 2 # sigmas[:, j].reshape(200, 200)
), # model 1
([[1.5, 1.5], [-1.5, -1.5]], # means
[[[1.5, 0], [0, 1]], np.eye(2) * 0.7] # sigmas
), # model2
([[0, 0], [0, 5], [5, 5]],
[np.eye(2)] * 3
), # model3
([[0, 0], [0, 5], [5, 5]],
[[[4, 0], [0, 1]], np.eye(2), np.eye(2)]
) # model4
]
for n_th, (u, sigma) in enumerate(models):
# generate random points
x = [] # store sample points
y = [] # store class labels
for i in range(len(u)):
x.append(np.random.multivariate_normal(u[i], sigma[i], n_samples))
y.append([i] * n_samples)
points = np.vstack(x)
labels = np.hstack(y)
x_min, y_min = np.min(points, axis=0)
x_max, y_max = np.max(points, axis=0)
x_range = np.linspace(x_min - 1, x_max + 1, 200)
y_range = np.linspace(y_min - 1, y_max + 1, 200)
xx, yy = np.meshgrid(x_range, y_range)
for k, model in enumerate((LDA(), QDA())):
#fit, predict
clf = model
clf.fit(points, labels)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(200, 200)
z_p = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
#draw areas and boundries
pl.figure()
pl.pcolormesh(xx, yy, z)
pl.cool()
for j in range(len(u)):
pl.contour(xx, yy, z_p[:, j].reshape(200, 200),
[0.5], lw=3, colors='k')
#draw points
for i, point in enumerate(x):
pl.plot(point[:, 0], point[:, 1], c[i] + m[i])
#draw contours
for i in range(len(u)):
prob = mvn2d(x_range, y_range, u[i], sigma[i])
cs = pl.contour(xx, yy, prob, colors=c[i])
pl.title('Seperate {0} classes using {1}'.
format(len(u), model_names[k]))
pl.savefig('discrimAnalysisDboundariesDemo_%d.png' % (n_th * 2 + k))
pl.show()
| mit |
JsNoNo/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
great-expectations/great_expectations | tests/test_utils.py | 1 | 11980 | import logging
import os
import uuid
from typing import List
import numpy as np
import pandas as pd
import pytest
from great_expectations.data_context.store import CheckpointStore, StoreBackend
from great_expectations.data_context.store.util import (
build_checkpoint_store_using_store_backend,
delete_checkpoint_config_from_store_backend,
delete_config_from_store_backend,
load_checkpoint_config_from_store_backend,
load_config_from_store_backend,
save_checkpoint_config_to_store_backend,
save_config_to_store_backend,
)
from great_expectations.data_context.types.base import BaseYamlConfig, CheckpointConfig
from great_expectations.data_context.util import build_store_from_config
logger = logging.getLogger(__name__)
# Taken from the following stackoverflow:
# https://stackoverflow.com/questions/23549419/assert-that-two-dictionaries-are-almost-equal
def assertDeepAlmostEqual(expected, actual, *args, **kwargs):
"""
Assert that two complex structures have almost equal contents.
Compares lists, dicts and tuples recursively. Checks numeric values
using pyteset.approx and checks all other values with an assertion equality statement
Accepts additional positional and keyword arguments and pass those
intact to pytest.approx() (that's how you specify comparison
precision).
"""
is_root = "__trace" not in kwargs
trace = kwargs.pop("__trace", "ROOT")
try:
# if isinstance(expected, (int, float, long, complex)):
if isinstance(expected, (int, float, complex)):
assert expected == pytest.approx(actual, *args, **kwargs)
elif isinstance(expected, (list, tuple, np.ndarray)):
assert len(expected) == len(actual)
for index in range(len(expected)):
v1, v2 = expected[index], actual[index]
assertDeepAlmostEqual(v1, v2, __trace=repr(index), *args, **kwargs)
elif isinstance(expected, dict):
assert set(expected) == set(actual)
for key in expected:
assertDeepAlmostEqual(
expected[key], actual[key], __trace=repr(key), *args, **kwargs
)
else:
assert expected == actual
except AssertionError as exc:
exc.__dict__.setdefault("traces", []).append(trace)
if is_root:
trace = " -> ".join(reversed(exc.traces))
exc = AssertionError("{}\nTRACE: {}".format(str(exc), trace))
raise exc
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except OSError as e:
print(e)
def create_files_for_regex_partitioner(
root_directory_path: str, directory_paths: list = None, test_file_names: list = None
):
if not directory_paths:
return
if not test_file_names:
test_file_names: list = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
base_directories = []
for dir_path in directory_paths:
if dir_path is None:
base_directories.append(dir_path)
else:
data_dir_path = os.path.join(root_directory_path, dir_path)
os.makedirs(data_dir_path, exist_ok=True)
base_dir = str(data_dir_path)
# Put test files into the directories.
for file_name in test_file_names:
file_path = os.path.join(base_dir, file_name)
with open(file_path, "w") as fp:
fp.writelines([f'The name of this file is: "{file_path}".\n'])
base_directories.append(base_dir)
def create_files_in_directory(
directory: str, file_name_list: List[str], file_content_fn=lambda: "x,y\n1,2\n2,3"
):
subdirectories = []
for file_name in file_name_list:
splits = file_name.split("/")
for i in range(1, len(splits)):
subdirectories.append(os.path.join(*splits[:i]))
subdirectories = set(subdirectories)
for subdirectory in subdirectories:
os.makedirs(os.path.join(directory, subdirectory), exist_ok=True)
for file_name in file_name_list:
file_path = os.path.join(directory, file_name)
with open(file_path, "w") as f_:
f_.write(file_content_fn())
def create_fake_data_frame():
return pd.DataFrame(
{
"x": range(10),
"y": list("ABCDEFGHIJ"),
}
)
def validate_uuid4(uuid_string: str) -> bool:
"""
Validate that a UUID string is in fact a valid uuid4.
Happily, the uuid module does the actual checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
From https://gist.github.com/ShawnMilo/7777304
Args:
uuid_string: string to check whether it is a valid UUID or not
Returns:
True if uuid_string is a valid UUID or False if not
"""
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
# If the uuid_string is a valid hex code,
# but an invalid uuid4,
# the UUID.__init__ will convert it to a
# valid uuid4. This is bad for validation purposes.
return val.hex == uuid_string.replace("-", "")
def get_sqlite_temp_table_names(engine):
result = engine.execute(
"""
SELECT
name
FROM
sqlite_temp_master
"""
)
rows = result.fetchall()
return {row[0] for row in rows}
def get_sqlite_table_names(engine):
result = engine.execute(
"""
SELECT
name
FROM
sqlite_master
"""
)
rows = result.fetchall()
return {row[0] for row in rows}
def build_in_memory_store_backend(
module_name: str = "great_expectations.data_context.store",
class_name: str = "InMemoryStoreBackend",
**kwargs,
) -> StoreBackend:
logger.debug("Starting data_context/store/util.py#build_in_memory_store_backend")
store_backend_config: dict = {"module_name": module_name, "class_name": class_name}
store_backend_config.update(**kwargs)
return build_store_from_config(
store_config=store_backend_config,
module_name=module_name,
runtime_environment=None,
)
def build_tuple_filesystem_store_backend(
base_directory: str,
*,
module_name: str = "great_expectations.data_context.store",
class_name: str = "TupleFilesystemStoreBackend",
**kwargs,
) -> StoreBackend:
logger.debug(
f"""Starting data_context/store/util.py#build_tuple_filesystem_store_backend using base_directory:
"{base_directory}"""
)
store_backend_config: dict = {
"module_name": module_name,
"class_name": class_name,
"base_directory": base_directory,
}
store_backend_config.update(**kwargs)
return build_store_from_config(
store_config=store_backend_config,
module_name=module_name,
runtime_environment=None,
)
def build_tuple_s3_store_backend(
bucket: str,
*,
module_name: str = "great_expectations.data_context.store",
class_name: str = "TupleS3StoreBackend",
**kwargs,
) -> StoreBackend:
logger.debug(
f"""Starting data_context/store/util.py#build_tuple_s3_store_backend using bucket: {bucket}
"""
)
store_backend_config: dict = {
"module_name": module_name,
"class_name": class_name,
"bucket": bucket,
}
store_backend_config.update(**kwargs)
return build_store_from_config(
store_config=store_backend_config,
module_name=module_name,
runtime_environment=None,
)
def build_checkpoint_store_using_filesystem(
store_name: str,
base_directory: str,
overwrite_existing: bool = False,
) -> CheckpointStore:
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
return build_checkpoint_store_using_store_backend(
store_name=store_name,
store_backend=store_backend_obj,
overwrite_existing=overwrite_existing,
)
def save_checkpoint_config_to_filesystem(
store_name: str,
base_directory: str,
checkpoint_name: str,
checkpoint_configuration: CheckpointConfig,
):
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
save_checkpoint_config_to_store_backend(
store_name=store_name,
store_backend=store_backend_obj,
checkpoint_name=checkpoint_name,
checkpoint_configuration=checkpoint_configuration,
)
def load_checkpoint_config_from_filesystem(
store_name: str,
base_directory: str,
checkpoint_name: str,
) -> CheckpointConfig:
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
return load_checkpoint_config_from_store_backend(
store_name=store_name,
store_backend=store_backend_obj,
checkpoint_name=checkpoint_name,
)
def delete_checkpoint_config_from_filesystem(
store_name: str,
base_directory: str,
checkpoint_name: str,
):
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
delete_checkpoint_config_from_store_backend(
store_name=store_name,
store_backend=store_backend_obj,
checkpoint_name=checkpoint_name,
)
def save_config_to_filesystem(
configuration_store_class_name: str,
configuration_store_module_name: str,
store_name: str,
base_directory: str,
configuration_key: str,
configuration: BaseYamlConfig,
):
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
save_config_to_store_backend(
class_name=configuration_store_class_name,
module_name=configuration_store_module_name,
store_name=store_name,
store_backend=store_backend_obj,
configuration_key=configuration_key,
configuration=configuration,
)
def load_config_from_filesystem(
configuration_store_class_name: str,
configuration_store_module_name: str,
store_name: str,
base_directory: str,
configuration_key: str,
) -> BaseYamlConfig:
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
return load_config_from_store_backend(
class_name=configuration_store_class_name,
module_name=configuration_store_module_name,
store_name=store_name,
store_backend=store_backend_obj,
configuration_key=configuration_key,
)
def delete_config_from_filesystem(
configuration_store_class_name: str,
configuration_store_module_name: str,
store_name: str,
base_directory: str,
configuration_key: str,
):
store_config: dict = {"base_directory": base_directory}
store_backend_obj: StoreBackend = build_tuple_filesystem_store_backend(
**store_config
)
delete_config_from_store_backend(
class_name=configuration_store_class_name,
module_name=configuration_store_module_name,
store_name=store_name,
store_backend=store_backend_obj,
configuration_key=configuration_key,
)
| apache-2.0 |
thekingofkings/chicago-crime | python/housePrice.py | 2 | 1403 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Parse Chicago house price per sqft.
Created on Fri May 19 20:30:20 2017
@author: hxw186
"""
from tract import Tract
import pandas as pd
from shapely.geometry import Point
import pickle
def get_individual_house_price():
houses = pd.read_csv("..//data/house_source.csv", index_col=0)
houses = houses.loc[lambda x : (x["priceSqft"] > 30) & (x["priceSqft"] < 3000), :]
return houses
def retrieve_CA_avg_house_price():
houses = get_individual_house_price()
cas = Tract.createAllCAObjects()
house_cnt = {k:0 for k in cas.keys()}
avg_price = {k:0.0 for k in cas.keys()}
for idx, house in houses.iterrows():
p = Point(house.lon, house.lat)
for k, ca in cas.items():
if ca.polygon.contains(p):
house_cnt[k] += 1
avg_price[k] += house.priceSqft
break
for k in house_cnt.keys():
if house_cnt[k] == 0:
print k
else:
avg_price[k] /= house_cnt[k]
assert avg_price[54] == 0
avg_price[54] = (avg_price[53] + avg_price[55]) / 2
with open("../data/ca-average-house-price.pickle", 'w') as fout:
pickle.dump(avg_price, fout)
pickle.dump(house_cnt, fout)
return avg_price
if __name__ == '__main__':
avg_price = retrieve_CA_avg_house_price() | mit |
Carralex/landlab | landlab/plot/imshow.py | 1 | 21335 | #! /usr/bin/env python
"""
Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.plot.imshow.imshow_grid
~landlab.plot.imshow.imshow_grid_at_cell
~landlab.plot.imshow.imshow_grid_at_node
"""
import numpy as np
import inspect
from landlab.field.scalar_data_fields import FieldError
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
from landlab.grid import CLOSED_BOUNDARY
from landlab.grid.raster import RasterModelGrid
from landlab.grid.voronoi import VoronoiDelaunayGrid
from landlab.plot.event_handler import query_grid_on_button_press
from landlab.utils.decorators import deprecated
def imshow_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
Node coordinates are printed when a mouse button is pressed on a cell in
the plot.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid_at_node(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values
if values_at_node.size != grid.number_of_nodes:
raise ValueError('number of values does not match number of nodes')
values_at_node = np.ma.masked_where(
grid.status_at_node == CLOSED_BOUNDARY, values_at_node)
try:
shape = grid.shape
except AttributeError:
shape = (-1, )
_imshow_grid_values(grid, values_at_node.reshape(shape), **kwds)
if isinstance(values, str):
plt.title(values)
plt.gcf().canvas.mpl_connect('button_press_event',
lambda event: query_grid_on_button_press(event, grid))
@deprecated(use='imshow_grid_at_node', version='0.5')
def imshow_node_grid(grid, values, **kwds):
imshow_grid_at_node(grid, values, **kwds)
def imshow_grid_at_cell(grid, values, **kwds):
"""Map view of grid data over all grid cells.
Prepares a map view of data over all cells in the grid.
Method can take any of the same ``**kwds`` as :func:`imshow_grid_at_node`.
Construction ::
imshow_grid_at_cell(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Values at the cells on the grid. Alternatively, can be a field name
(string) from which to draw the data from the grid.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
Raises
------
ValueError
If input grid is not uniform rectilinear.
"""
if isinstance(values, str):
try:
values_at_cell = grid.at_cell[values]
except FieldError:
values_at_cell = grid.at_node[values]
else:
values_at_cell = values
if values_at_cell.size == grid.number_of_nodes:
values_at_cell = values_at_cell[grid.node_at_cell]
if values_at_cell.size != grid.number_of_cells:
raise ValueError('number of values must match number of cells or '
'number of nodes')
values_at_cell = np.ma.asarray(values_at_cell)
values_at_cell.mask = True
values_at_cell.mask[grid.core_cells] = False
myimage = _imshow_grid_values(grid,
values_at_cell.reshape(grid.cell_grid_shape),
**kwds)
if isinstance(values, str):
plt.title(values)
return myimage
@deprecated(use='imshow_grid_at_cell', version='0.5')
def imshow_cell_grid(grid, values, **kwds):
imshow_grid_at_cell(grid, values, **kwds)
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=(None, None),
symmetric_cbar=False, cmap='pink', limits=None,
colorbar_label = None,
allow_colorbar=True, vmin=None, vmax=None,
norm=None, shrink=1., color_for_closed='black',
color_for_background=None, show_elements=False,
output=None):
gridtypes = inspect.getmro(grid.__class__)
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.)
if isinstance(grid, RasterModelGrid):
if values.ndim != 2:
raise ValueError('values must have ndim == 2')
y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5
kwds = dict(cmap=cmap)
(kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds['vmin'], kwds['vmax']) = (- limit, limit)
elif limits is not None:
(kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
else:
if vmin is not None:
kwds['vmin'] = vmin
if vmax is not None:
kwds['vmax'] = vmax
if np.isclose(grid.dx, grid.dy):
if values.size == grid.number_of_nodes:
myimage = plt.imshow(
values.reshape(grid.shape), origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
else: # this is a cell grid, and has been reshaped already...
myimage = plt.imshow(values, origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
myimage = plt.pcolormesh(x, y, values, **kwds)
plt.gca().set_aspect(1.)
plt.autoscale(tight=True)
if allow_colorbar:
cb = plt.colorbar(norm=norm, shrink=shrink)
if colorbar_label:
cb.set_label(colorbar_label)
elif VoronoiDelaunayGrid in gridtypes:
# This is still very much ad-hoc, and needs prettifying.
# We should save the modifications needed to plot color all the way
# to the diagram edge *into* the grid, for faster plotting.
# (see http://stackoverflow.com/questions/20515554/...
# colorize-voronoi-diagram)
# (This technique is not implemented yet)
from scipy.spatial import voronoi_plot_2d
import matplotlib.colors as colors
import matplotlib.cm as cmx
cm = plt.get_cmap(cmap)
if (limits is None) and ((vmin is None) and (vmax is None)):
# only want to work with NOT CLOSED nodes
open_nodes = grid.status_at_node != 4
if symmetric_cbar:
(var_min, var_max) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
limit = max(abs(var_min), abs(var_max))
(vmin, vmax) = (- limit, limit)
else:
(vmin, vmax) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
elif limits is not None:
(vmin, vmax) = (limits[0], limits[1])
else:
open_nodes = grid.status_at_node != 4
if vmin is None:
vmin = values.flat[open_nodes].min()
if vmax is None:
vmax = values.flat[open_nodes].max()
cNorm = colors.Normalize(vmin, vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorVal = scalarMap.to_rgba(values)
if show_elements:
myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
show_points=False)
# show_points to be supported in scipy0.18, but harmless for now
mycolors = (i for i in colorVal)
for order in grid.vor.point_region:
region = grid.vor.regions[order]
colortouse = next(mycolors)
if -1 not in region:
polygon = [grid.vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=colortouse)
plt.gca().set_aspect(1.)
# plt.autoscale(tight=True)
# Tempting though it is to move the boundary outboard of the outermost
# nodes (e.g., to the outermost corners), this is a bad idea, as the
# outermost cells tend to have highly elongated shapes which make the
# plot look stupid
plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))
scalarMap.set_array(values)
if allow_colorbar:
cb = plt.colorbar(scalarMap, shrink=shrink)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == '-' and grid_units[0] == '-':
plt.xlabel('X')
plt.ylabel('Y')
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
if plot_name is not None:
plt.title('%s' % (plot_name))
if var_name is not None or var_units is not None:
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + ' (' + var_units + ')'
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = '(' + var_units + ')'
assert type(colorbar_label) is str
assert allow_colorbar
cb.set_label(colorbar_label)
if color_for_background is not None:
plt.gca().set_axis_bgcolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
def imshow_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes or cells in the grid.
Data is plotted as colored cells. If at='node', the surrounding cell is
shaded with the value at the node at its center. If at='cell', the cell
is shaded with its own value. Outer edges of perimeter cells are
extrapolated. Closed elements are colored uniformly (default black,
overridden with kwd 'color_for_closed'); other open boundary nodes get
their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node or cell values, or a field name as a string from which to draw
the data.
at : str, {'node', 'cell'}
Tells plotter where values are defined.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
show = kwds.pop('show', False)
values_at = kwds.pop('values_at', 'node')
values_at = kwds.pop('at', values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == 'node':
imshow_grid_at_node(grid, values, **kwds)
elif values_at == 'cell':
imshow_grid_at_cell(grid, values, **kwds)
else:
raise TypeError('value location %s not understood' % values_at)
# retained for backwards compatibility:
if show:
plt.show()
| mit |
crawfordsm/pyspectrograph | ah_bootstrap.py | 16 | 35044 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/neural_networks/plot_mlp_training_curves.py | 1 | 4499 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
# plt.show()
pltshow(plt)
| mit |
seap-udea/tQuakes | plots/analysis/quake-map.py | 2 | 2786 | # ############################################################
# IMPORT TOOLS
# ############################################################
from tquakes import *
from matplotlib import use
use('Agg')
import matplotlib.pyplot as plt
confile=prepareScript()
conf=execfile(confile)
quake=loadConf("quake.conf")
# ############################################################
# CONNECT TO DATABASE
# ############################################################
connection=connectDatabase()
db=connection.cursor()
# ############################################################
# PREPARE PLOTTING REGION
# ############################################################
fig,axs=subPlots(plt,[1])
# ############################################################
# GET QUAKES
# ############################################################
dl=dlat/10.0
dt=dlon/10.0
latb=center[0]-dlat/2;latu=center[0]+dlat/2
lonl=center[1]-dlon/2;lonr=center[1]+dlon/2
search=search+"and (cluster1='0' or cluster1 like '-%%') and qlat+0>=%.2f and qlat+0<%.2f and qlon+0>=%.2f and qlon+0<%.2f limit %d"%(latb,latu,lonl,lonr,limit)
qids,quakes=getQuakes(search,db)
nquakes=len(qids)
# ############################################################
# GET QUAKE MAP
# ############################################################
# ############################################################
# CREATE MAPS
# ############################################################
ms=scatterMap(axs[0],quakes[:,QLAT],quakes[:,QLON],resolution=resolution,
limits=[center[0],center[1],dlat,dlon],
merdict=dict(labels=[False,False,True,False]),
color='k',marker='o',linestyle='none',
markeredgecolor='none',markersize=1,zorder=10)
slon=float(quake.qlon);slat=float(quake.qlat);
x,y=ms(slon,slat)
ms.plot(x,y,'wo',markersize=20,zorder=50000)
# ############################################################
# DECORATION
# ############################################################
axs[0].text(0.95,0.05,"N = %d\nlat,lon = %.2f, %.2f\n$\Delta$(lat,lon) = %.2f, %.2f"%(nquakes,
center[0],
center[1],
dlat,dlon),
horizontalalignment="right",
verticalalignment="bottom",
zorder=50,bbox=dict(fc='w',pad=20),
transform=axs[0].transAxes)
# ############################################################
# SAVING FIGURE
# ############################################################
saveFigure(confile,fig)
| gpl-2.0 |
NicovincX2/Python-3.5 | Probabilités/Théorème de la théorie des probabilités/central_limit_theorem.py | 1 | 1822 | # -*- coding: utf-8 -*-
import os
from __future__ import division
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
# provides capability to define function with partial arguments
from functools import partial
N = 1000000 # number of times n samples are taken. Try varying this number.
nobb = 101 # number of bin boundaries on plots
n = np.array([1, 2, 3, 5, 10, 100]) # number of samples to average over
exp_mean = 3 # mean of exponential distribution
a, b = 0.7, 0.5 # parameters of beta distribution
dist = [partial(np.random.random), partial(
np.random.exponential, exp_mean), partial(np.random.beta, a, b)]
title_names = ["Flat", "Exponential (mean=%.1f)" %
exp_mean, "Beta (a=%.1f, b=%.1f)" % (a, b)]
drange = np.array([[0, 1], [0, 10], [0, 1]]) # ranges of distributions
means = np.array([0.5, exp_mean, a / (a + b)]) # means of distributions
# variances of distributions
var = np.array([1 / 12, exp_mean**2, a * b / ((a + b + 1) * (a + b)**2)])
binrange = np.array([np.linspace(p, q, nobb) for p, q in drange])
ln, ld = len(n), len(dist)
plt.figure(figsize=((ld * 4) + 1, (ln * 2) + 1))
for i in xrange(ln): # loop over number of n samples to average over
for j in xrange(ld): # loop over the different distributions
plt.subplot(ln, ld, i * ld + 1 + j)
plt.hist(np.mean(dist[j]((N, n[i])), 1), binrange[j], normed=True)
plt.xlim(drange[j])
if j == 0:
plt.ylabel('n=%i' % n[i], fontsize=15)
if i == 0:
plt.title(title_names[j], fontsize=15)
else:
clt = (1 / (np.sqrt(2 * np.pi * var[j] / n[i]))) * exp(-(
((binrange[j] - means[j])**2) * n[i] / (2 * var[j])))
plt.plot(binrange[j], clt, 'r', linewidth=2)
plt.show()
os.system("pause")
| gpl-3.0 |
frodo81/EnergyMonitor | TestFiles/LibTest.py | 1 | 6355 | '''
Created on 01.11.2013
@author: bernd
'''
import unittest
from emLibrary.InfoFile import InfoFile
from emLibrary.DataFile import DataFile
from datetime import datetime
import pandas as pd
from sympy.plotting.plot import plt
class Test(unittest.TestCase):
# Location of INFO-File
filename = "a01307fe.bin"
datafile = "a0130800.bin"
# datafile = "a0130884.bin"
# datafile = "a013081f.bin"
# datafile = "a013085c.bin"
def testConstructor(self):
print "Starting testConstructor ..."
testobject = InfoFile(self.filename)
print " " + testobject.infoHex
print "Finished testConstructor sucessfully!\n"
def testValue(self):
print "Starting testValue ..."
print " Test with 2 params"
testobject = InfoFile(self.filename)
print " %s" % (testobject.getdec(5, 7)/1000.0)
print " %s" % (testobject.getdec(8, 10)/100.0)
print " Test with 3 params"
print " %s" % (testobject.getdec(5,7,"494e464f3a000a4d00d4420065280001530000a60001020000b200000b00000a00008f0000b70000d20000c70415071c0960096009600960096009600960096004130654044504a90729067307a706b503f60420000002000000020000133305100dffffffff")/1000.0)
print "Finished testValue sucessfully!\n"
def testAttributes(self):
print "Starting testAttributes ..."
testobject = InfoFile(self.filename)
print " Total power and times"
print " Total power: %s kWh" % testobject.TotalPower
print " Total recorded time: %s h" % testobject.TotalRecTime
print " Total ON time: %s h" % testobject.TotalONTime
print " Total power values"
print " as List: "
print " " + str(testobject.Totalkwh)
print " as value: "
print " Total kwh (today): %s kwh" % testobject.Totalkwh[0]
print " Total kwh (yesterday): %s kwh" % testobject.Totalkwh[1]
print " Total kwh (2 days ago): %s kwh" % testobject.Totalkwh[2]
print " Total kwh (3 days ago): %s kwh" % testobject.Totalkwh[3]
print " Total kwh (4 days ago): %s kwh" % testobject.Totalkwh[4]
print " Total kwh (5 days ago): %s kwh" % testobject.Totalkwh[5]
print " Total kwh (6 days ago): %s kwh" % testobject.Totalkwh[6]
print " Total kwh (7 days ago): %s kwh" % testobject.Totalkwh[7]
print " Total kwh (8 days ago): %s kwh" % testobject.Totalkwh[8]
print " Total kwh (9 days ago): %s kwh" % testobject.Totalkwh[9]
print " Total recorded times values"
print " as List: "
print " " + str(testobject.TotalRecTimeList)
print " as value: "
print " Total recorded time (today): %s h" % testobject.TotalRecTimeList[0]
print " Total recorded time (yesterday): %s h" % testobject.TotalRecTimeList[1]
print " Total recorded time (2 days ago): %s h" % testobject.TotalRecTimeList[2]
print " Total recorded time (3 days ago): %s h" % testobject.TotalRecTimeList[3]
print " Total recorded time (4 days ago): %s h" % testobject.TotalRecTimeList[4]
print " Total recorded time (5 days ago): %s h" % testobject.TotalRecTimeList[5]
print " Total recorded time (6 days ago): %s h" % testobject.TotalRecTimeList[6]
print " Total recorded time (7 days ago): %s h" % testobject.TotalRecTimeList[7]
print " Total recorded time (8 days ago): %s h" % testobject.TotalRecTimeList[8]
print " Total recorded time (9 days ago): %s h" % testobject.TotalRecTimeList[9]
print " Total ON times values"
print " as List: "
print " " + str(testobject.TotalONTimeList)
print " as value: "
print " Total ON time (today): %s h" % testobject.TotalONTimeList[0]
print " Total ON time (yesterday): %s h" % testobject.TotalONTimeList[1]
print " Total ON time (2 days ago): %s h" % testobject.TotalONTimeList[2]
print " Total ON time (3 days ago): %s h" % testobject.TotalONTimeList[3]
print " Total ON time (4 days ago): %s h" % testobject.TotalONTimeList[4]
print " Total ON time (5 days ago): %s h" % testobject.TotalONTimeList[5]
print " Total ON time (6 days ago): %s h" % testobject.TotalONTimeList[6]
print " Total ON time (7 days ago): %s h" % testobject.TotalONTimeList[7]
print " Total ON time (8 days ago): %s h" % testobject.TotalONTimeList[8]
print " Total ON time (9 days ago): %s h" % testobject.TotalONTimeList[9]
print " ID Number"
print " ID number: %s" % testobject.ID
print " Tarif information"
print " Tarif 1: %s Euro" % testobject.Tarif1
print " Tarif 2: %s Euro" % testobject.Tarif2
print " Date and time information"
print " Initial Date and time: %s " % testobject.InitialDate
print "Finished testAttributes sucessfully!\n"
def testDataConstructor(self):
print "Starting testDataConstructor ..."
testobject = DataFile(self.datafile)
print testobject.dataHexList
print testobject.StartDateList
# for key in sorted(testobject.DataDic.iterkeys()):
# print "%s : %s" % (key,testobject.DataDic[key])
for key in sorted(testobject.DataDic.iterkeys()):
print "%s;%s;%s;%s" % (key,testobject.DataDic[key][0],testobject.DataDic[key][1],testobject.DataDic[key][2])
# print testobject.DataDic
# print testobject.DataDic[datetime(2013, 5, 17, 17, 8)]
# print testobject.DataDic[datetime(2013, 5, 17, 17, 9)]
# print testobject.DataDic[datetime(2013, 5, 17, 17, 10)]
print "Finished testConstructor sucessfully!\n"
def testPandasDataFile(self):
testobject = DataFile(self.datafile)
pdData = pd.DataFrame(testobject.DataDic.values(), index=testobject.DataDic.keys())
pdData.plot()
plt.show()
print "Finished testAttributes sucessfully!\n"
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testConstructor']
unittest.main()
| gpl-2.0 |
amolkahat/pandas | pandas/tests/api/test_types.py | 2 | 2622 | # -*- coding: utf-8 -*-
import sys
import pytest
from pandas.api import types
from pandas.util import testing as tm
from .test_api import Base
class TestTypes(Base):
allowed = ['is_bool', 'is_bool_dtype',
'is_categorical', 'is_categorical_dtype', 'is_complex',
'is_complex_dtype', 'is_datetime64_any_dtype',
'is_datetime64_dtype', 'is_datetime64_ns_dtype',
'is_datetime64tz_dtype', 'is_datetimetz', 'is_dtype_equal',
'is_extension_type', 'is_float', 'is_float_dtype',
'is_int64_dtype', 'is_integer',
'is_integer_dtype', 'is_number', 'is_numeric_dtype',
'is_object_dtype', 'is_scalar', 'is_sparse',
'is_string_dtype', 'is_signed_integer_dtype',
'is_timedelta64_dtype', 'is_timedelta64_ns_dtype',
'is_unsigned_integer_dtype', 'is_period',
'is_period_dtype', 'is_interval', 'is_interval_dtype',
'is_re', 'is_re_compilable',
'is_dict_like', 'is_iterator', 'is_file_like',
'is_list_like', 'is_hashable', 'is_array_like',
'is_named_tuple',
'pandas_dtype', 'union_categoricals', 'infer_dtype']
deprecated = ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']
dtypes = ['CategoricalDtype', 'DatetimeTZDtype',
'PeriodDtype', 'IntervalDtype']
def test_types(self):
self.check(types, self.allowed + self.dtypes + self.deprecated)
def check_deprecation(self, fold, fnew):
with tm.assert_produces_warning(DeprecationWarning):
try:
result = fold('foo')
expected = fnew('foo')
assert result == expected
except TypeError:
pytest.raises(TypeError, lambda: fnew('foo'))
except AttributeError:
pytest.raises(AttributeError, lambda: fnew('foo'))
def test_deprecated_from_api_types(self):
for t in self.deprecated:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(types, t)(1)
def test_moved_infer_dtype():
# del from sys.modules to ensure we try to freshly load.
# if this was imported from another test previously, we would
# not see the warning, since the import is otherwise cached.
sys.modules.pop("pandas.lib", None)
with tm.assert_produces_warning(FutureWarning):
import pandas.lib
e = pandas.lib.infer_dtype('foo')
assert e is not None
| bsd-3-clause |
Alwnikrotikz/freetype-py | examples/example_1.py | 2 | 2975 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
#
# Direct translation of example 1 from the freetype tutorial:
# http://www.freetype.org/freetype2/docs/tutorial/step1.html
#
import math
from freetype import *
if __name__ == '__main__':
import Image
from freetype import *
WIDTH, HEIGHT = 640, 480
image = Image.new('L', (WIDTH,HEIGHT))
def draw_bitmap( bitmap, x, y):
x_max = x + bitmap.width
y_max = y + bitmap.rows
p = 0
for p,i in enumerate(range(x,x_max)):
for q,j in enumerate(range(y,y_max)):
if i < 0 or j < 0 or i >= WIDTH or j >= HEIGHT:
continue;
pixel = image.getpixel((i,j))
pixel |= int(bitmap.buffer[q * bitmap.width + p]);
image.putpixel((i,j), pixel)
library = FT_Library()
matrix = FT_Matrix()
face = FT_Face()
pen = FT_Vector()
filename= './Vera.ttf'
text = 'Hello World !'
num_chars = len(text)
angle = ( 25.0 / 360 ) * 3.14159 * 2
# initialize library, error handling omitted
error = FT_Init_FreeType( byref(library) )
# create face object, error handling omitted
error = FT_New_Face( library, filename, 0, byref(face) )
# set character size: 50pt at 100dpi, error handling omitted
error = FT_Set_Char_Size( face, 50 * 64, 0, 100, 0 )
slot = face.contents.glyph
# set up matrix
matrix.xx = (int)( math.cos( angle ) * 0x10000L )
matrix.xy = (int)(-math.sin( angle ) * 0x10000L )
matrix.yx = (int)( math.sin( angle ) * 0x10000L )
matrix.yy = (int)( math.cos( angle ) * 0x10000L )
# the pen position in 26.6 cartesian space coordinates; */
# start at (300,200) relative to the upper left corner */
pen.x = 200 * 64;
pen.y = ( HEIGHT - 300 ) * 64
for n in range(num_chars):
# set transformation
FT_Set_Transform( face, byref(matrix), byref(pen) )
# load glyph image into the slot (erase previous one)
charcode = ord(text[n])
index = FT_Get_Char_Index( face, charcode )
FT_Load_Glyph( face, index, FT_LOAD_RENDER )
# now, draw to our target surface (convert position)
draw_bitmap( slot.contents.bitmap,
slot.contents.bitmap_left,
HEIGHT - slot.contents.bitmap_top )
# increment pen position
pen.x += slot.contents.advance.x
pen.y += slot.contents.advance.y
FT_Done_Face(face)
FT_Done_FreeType(library)
import matplotlib.pyplot as plt
plt.imshow(image, origin='lower',
interpolation='nearest', cmap=plt.cm.gray)
plt.show()
| bsd-3-clause |
alexnowakvila/DCN | code/Sorting/Logger.py | 1 | 3298 | import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.spatial import ConvexHull
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
class Logger(object):
def __init__(self, path):
directory = os.path.join(path, 'plots/')
self.path = directory
# Create directory if necessary
try:
os.stat(directory)
except:
os.mkdir(directory)
def write_settings(self, args):
# write info
path = self.path + 'settings.txt'
with open(path, 'w') as file:
for arg in vars(args):
file.write(str(arg) + ' : ' + str(getattr(args, arg)) + '\n')
def plot_losses(self, losses, losses_reg, scales=[], fig=0):
# discriminative losses
plt.figure(fig)
plt.clf()
plt.semilogy(range(0, len(losses)), losses, 'b')
plt.xlabel('iterations')
plt.ylabel('Loss')
plt.title('discriminative loss')
path = os.path.join(self.path, 'losses.png')
plt.savefig(path)
# reg loss
plt.figure(fig + 1)
plt.clf()
plt.semilogy(range(0, len(losses_reg)), losses_reg, 'b')
plt.xlabel('iterations')
plt.ylabel('Loss')
plt.title('split regularization loss')
path = os.path.join(self.path, 'split_variances.png')
plt.savefig(path)
def plot_accuracies(self, accuracies, scales=[], mode='train', fig=0):
plt.figure(fig)
plt.clf()
colors = cm.rainbow(np.linspace(0, 1, len(scales)))
l = []
names = [str(sc) for sc in scales]
for i, acc in enumerate(accuracies):
ll, = plt.plot(range(len(acc)), acc, color=colors[i])
l.append(ll)
plt.ylabel('accuracy')
plt.legend(l, names, loc=2, prop={'size': 6})
if mode == 'train':
plt.xlabel('iterations')
else:
plt.xlabel('iterations x 1000')
path = os.path.join(self.path, 'accuracies_{}.png'.format(mode))
plt.savefig(path)
def plot_Phis_sparsity(self, Phis, fig=0):
Phis = [phis[0].data.cpu().numpy() for phis in Phis]
plt.figure(fig)
plt.clf()
for i, phi in enumerate(Phis):
plt.subplot(1, len(Phis), i + 1)
# plot first element of the batch
plt.spy(phi, precision=0.001, marker='o', markersize=2)
plt.xticks([])
plt.yticks([])
plt.title('k={}'.format(i))
path = os.path.join(self.path, 'Phis.png')
plt.savefig(path)
def save_results(self, losses, accuracies_test):
np.savez(self.path + 'results.npz', Loss=losses,
Accuracies=accuracies_test)
def save_test_results(self, accuracies_test, scales):
path = self.path + 'test_results.txt'
with open(path, 'w') as file:
file.write('--------------TEST RESULTS-------------- \n')
for i, accs in enumerate(accuracies_test):
result_acc = ('Accuracy for {} scales: {} \n'
.format(scales[i], accs))
file.write(result_acc + '\n')
| bsd-3-clause |
ssaeger/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
crockettcobb/data | pew-religions/Religion-Leah.py | 37 | 3271 | #!/usr/bin/env python
import numpy as np
import pandas as pd
religions = ['Buddhist', 'Catholic', 'Evangel Prot', 'Hindu', 'Hist Black Prot', 'Jehovahs Witness', 'Jewish', 'Mainline Prot', 'Mormon', 'Muslim', 'Orthodox Christian', 'Unaffiliated']
csv = open("current.csv", 'w')
csv.truncate()
def write_row(matrix):
arr = np.asarray(matrix[0])[0]
row = ','.join([str(a) for a in arr]) + '\n'
csv.write(row)
# Intitial distribution of religions in US
first = np.matrix([.007, .208, .254, .007, .065, .008, .019, .147, .016, .009, .005, .228])
# Normed to sum to 100%
current = first / np.sum(first)
t0 = current
write_row(current)
# Transition matrix
trans = np.matrix(((0.390296314, 0.027141947, 0.06791021, 0.001857564, 0, 0, 0.011166082, 0.059762879, 0, 0, 0, 0.396569533),
(0.005370791, 0.593173325, 0.103151608, 0.000649759, 0.010486747, 0.005563864, 0.002041424, 0.053825329, 0.004760476, 0.001130529, 0.000884429, 0.199488989),
(0.00371836, 0.023900817, 0.650773331, 0.000250102, 0.016774503, 0.003098214, 0.001865491, 0.122807467, 0.004203107, 0.000186572, 0.002123778, 0.151866648),
(0, 0, 0.0033732, 0.804072618, 0, 0.001511151, 0, 0.01234639, 0, 0.00209748, 0, 0.17659916),
(0.002051357, 0.016851659, 0.09549708, 0, 0.699214315, 0.010620473, 0.000338804, 0.024372871, 0.000637016, 0.009406884, 0.000116843, 0.129892558),
(0, 0.023278276, 0.109573979, 0, 0.077957568, 0.336280578, 0, 0.074844833, 0.007624035, 0, 0, 0.35110361),
(0.006783201, 0.004082693, 0.014329604, 0, 0, 0.000610585, 0.745731278, 0.009587587, 0, 0, 0.002512334, 0.184058682),
(0.005770357, 0.038017215, 0.187857555, 0.000467601, 0.008144075, 0.004763516, 0.003601208, 0.451798506, 0.005753587, 0.000965543, 0.00109818, 0.25750798),
(0.007263135, 0.01684885, 0.06319935, 0.000248467, 0.0059394, 0, 0.001649896, 0.03464334, 0.642777489, 0.002606278, 0, 0.208904711),
(0, 0.005890381, 0.023573308, 0, 0.011510643, 0, 0.005518343, 0.014032084, 0, 0.772783807, 0, 0.15424369),
(0.004580353, 0.042045841, 0.089264134 , 0, 0.00527346, 0, 0, 0.061471387, 0.005979218, 0.009113978, 0.526728084, 0.243246723),
(0.006438308, 0.044866331, 0.1928814, 0.002035375, 0.04295005, 0.010833621, 0.011541439, 0.09457963, 0.01365141, 0.005884336, 0.002892072, 0.525359211)))
# Fertility array
fert = np.matrix(((2.1, 2.3, 2.3, 2.1, 2.5, 2.1, 2, 1.9, 3.4, 2.8, 2.1, 1.7)))
# Create data frame for printing later
religionDataFrame = pd.DataFrame()
for x in range(0,100):
### beginning of conversion step
# apply transition matrix to current distribution
current = current * trans
### beginning of fertility step
# divide by two for couple number
current = current/2
# adjust by fertility
current = np.multiply(fert, current)
# normalize to 100%
current = current / np.sum(current)
write_row(current)
# add to data frame
religionDataFrame = religionDataFrame.append(pd.DataFrame(current), ignore_index=True)
csv.close()
religionDataFrame.columns = religions
religionDataFrame.to_csv("current_pandas_save.csv")
| mit |
aabadie/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 49 | 46769 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| gpl-2.0 |
binghongcha08/pyQMD | GWP/2D/1.1.1/contour.py | 1 | 1753 |
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#import seaborn as sns
#sns.set_context("paper",font_scale=1.5)
#sns.set_style({'font.family':'Times New Roman'})
#matplotlib.rcParams['xtick.direction'] = 'out'
#matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rcParams.update({'font.size': 20})
font = {'family' : 'Times New Roman', 'weight' : 'normal', 'size' : 18}
matplotlib.rc('font', **font)
delta = 0.02
xmin = -4.0
xmax = 4.0
ymin = -3.0
ymax = 3.0
X = np.arange(xmin, xmax, delta)
Y = np.arange(ymin, ymax, delta)
x, y = np.meshgrid(X, Y)
#Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
a = 16
#Z1 = np.exp(-a*(X-1)**2 -a*(Y+1)**2)
#Z2 = np.exp(-a*(X+1)**2 -a*(Y-1)**2)
#z = (1.0-(x-np.sqrt(2.0)*y)**2/3.0)**2/8.0 + (np.sqrt(2.0)*x-y)**2/6.0-1.0/8.0
a = 1.0
b = 4.0
c = 4.0
z = y**2*(a*y**2-b)+c*(x-y)**2/2.0+b**2/4.0/a
cmap = cm.get_cmap('ocean')
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
levels = [0.0, 1, 2, 4.0, 8.0,12.0,16.0,20]
CS = plt.contour(x, y, z, levels,cmap=cmap)
#sns.kdeplot(z)
#plt.contour(X, Y, Z2,cmap=cmap)
plt.clabel(CS, inline=1, fontsize=9)
#cb = plt.colorbar(CS)
#cb.set_label('')
#plt.title('Simplest default with labels')
dat = np.genfromtxt(fname='xyt.dat')
plt.plot(dat[:,0],dat[:,1],'ko',markersize=4)
plt.xlabel('x [Bohr]')
plt.ylabel('y [Bohr]')
plt.xlim(-4,4)
plt.ylim(-3.6,3)
plt.savefig('contour.pdf')
plt.show()
| gpl-3.0 |
khkaminska/scikit-learn | sklearn/datasets/samples_generator.py | 103 | 56423 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
jorik041/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 103 | 41083 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/metrics/tests/test_ranking.py | 11 | 37239 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
"""Test to ensure that we don't return spurious repeating thresholds.
Duplicated thresholds can arise due to machine precision issues.
"""
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
"""Test that roc_auc_score function returns an error when trying
to compute AUC for non-binary class values.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
"""Check on several small example that it works """
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
"""Check tie handling in score"""
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
""" Check that Label ranking average precision works for various"""
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
| bsd-3-clause |
cbertinato/pandas | pandas/tests/arrays/categorical/test_constructors.py | 1 | 23771 | from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DatetimeIndex, Index, Interval,
IntervalIndex, NaT, Series, Timestamp, date_range, period_range,
timedelta_range)
import pandas.util.testing as tm
class TestCategoricalConstructors:
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with pytest.raises(exp_err, match=exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with pytest.raises(exp_err, match=exp_msg):
Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
ordered=ordered)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = pd.Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
('a', 'b')], dtype=object)[:-1]
result = Categorical(values)
expected = Index([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
msg = ("'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument.")
with pytest.raises(TypeError, match=msg):
Categorical(arr, ordered=True)
def test_constructor_interval(self):
result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
ordered=True)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
Categorical([1, 2], [1, 2, 2])
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ["a", "b", "b"])
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]),
categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2., 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1., 2., 3.])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
# categories
c0 = Categorical(["a", "b", "c", "a"])
c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
c2 = Categorical(c0, categories=c1.categories)
tm.assert_categorical_equal(c1, c2)
c3 = Categorical(Series(c0), categories=c1.categories)
tm.assert_categorical_equal(c1, c3)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
with pytest.raises(TypeError, match=msg):
Categorical(['a', 'b'], categories='a')
def test_constructor_with_null(self):
# Cannot have NaN in categories
msg = "Categorial categories cannot be null"
with pytest.raises(ValueError, match=msg):
Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical([None, "a", "b", "c"],
categories=[None, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical(DatetimeIndex(['nat', '20160101']),
categories=[NaT, Timestamp('20160101')])
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values,
Categorical(ci.astype(object),
categories=ci.categories))
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(xrange(3))
tm.assert_categorical_equal(cat, exp)
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=xrange(3))
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize("dtl", [
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5,
freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s")
])
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range('1 days', freq='D', periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range('2015-01-01', freq='D', periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1., 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype='int64'),
['a', 'b', 'c', np.nan],
[pd.Period('2014-01'), pd.Period('2014-02'), NaT],
[Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],
[Timestamp('2014-01-01', tz='US/Eastern'),
Timestamp('2014-01-02', tz='US/Eastern'), NaT],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ['b', 'a', 'c']
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)
expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(['a', 'b'], ordered=True)
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(['a', 'b'], ordered=True, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(['a', 'b'], ordered=False, dtype=dtype)
@pytest.mark.parametrize('categories', [
None, ['a', 'b'], ['a', 'c'],
])
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(['a', 'b'], categories=categories,
ordered=ordered, dtype='category')
expected = Categorical(['a', 'b'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(['a', 'b', 'd'])
# use categories, ordered
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
dtype='category')
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
result = Categorical(
['a', 'b'], categories=Categorical(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
result = Categorical(
['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
def test_from_codes(self):
# too few categories
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be between "
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], dtype=dtype)
# no int codes
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], dtype=dtype)
# no unique categories
with pytest.raises(ValueError,
match="Categorical categories must be unique"):
Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"])
# NaN categories included
with pytest.raises(ValueError,
match="Categorial categories cannot be null"):
Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan])
# too negative
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = r"codes need to be between -1 and len\(categories\)-1"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], dtype=dtype)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], categories=dtype.categories)
tm.assert_categorical_equal(exp, res)
res = Categorical.from_codes([0, 1, 2], dtype=dtype)
tm.assert_categorical_equal(exp, res)
def test_from_codes_with_categorical_categories(self):
# GH17884
expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
result = Categorical.from_codes(
[0, 1], categories=Categorical(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
result = Categorical.from_codes(
[0, 1], categories=CategoricalIndex(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
# non-unique Categorical still raises
with pytest.raises(ValueError,
match="Categorical categories must be unique"):
Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
dtype = CategoricalDtype(categories=['a', 'b', 'c'])
with pytest.raises(ValueError,
match="codes need to be array-like integers"):
Categorical.from_codes(codes, categories=dtype.categories)
with pytest.raises(ValueError,
match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_float(self):
# GH21767
codes = [1.0, 2.0, 0] # integer, but in float dtype
dtype = CategoricalDtype(categories=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
cat = Categorical.from_codes(codes, dtype.categories)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype='i1'))
with tm.assert_produces_warning(FutureWarning):
cat = Categorical.from_codes(codes, dtype=dtype)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype='i1'))
codes = [1.1, 2.0, 0] # non-integer
with pytest.raises(ValueError,
match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError,
match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_dtype_raises(self):
msg = 'Cannot specify'
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1], categories=['a', 'b'],
dtype=CategoricalDtype(['a', 'b']))
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1], ordered=True,
dtype=CategoricalDtype(['a', 'b']))
def test_from_codes_neither(self):
msg = "Both were None"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
@pytest.mark.parametrize('dtype', [None, 'category'])
def test_from_inferred_categories(self, dtype):
cats = ['a', 'b']
codes = np.array([0, 0, 1, 1], dtype='i8')
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'category'])
def test_from_inferred_categories_sorts(self, dtype):
cats = ['b', 'a']
codes = np.array([0, 1, 1, 1], dtype='i8')
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ['a', 'b', 'd']
codes = np.array([0, 1, 0, 2], dtype='i8')
dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(['a', 'b', 'a', 'd'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ['1', '2', 'bad']
codes = np.array([0, 0, 1, 2], dtype='i8')
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('ordered', [None, True, False])
def test_construction_with_ordered(self, ordered):
# GH 9347, 9190
cat = Categorical([0, 1, 2], ordered=ordered)
assert cat.ordered == bool(ordered)
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
| bsd-3-clause |
lukas/scikit-class | examples/by-hand/utils/models.py | 2 | 5070 | import ipywidgets
from ipywidgets import interact
import matplotlib.pyplot as plt
import numpy as np
import wandb
class Model(object):
"""Base class for the other *Model classes.
Implements plotting and interactive components
and interface with Parameters object."""
def __init__(self, input_values, model_inputs, parameters, funk):
self.model_inputs = np.atleast_2d(model_inputs)
self.input_values = input_values
self.parameters = parameters
self.funk = funk
self.plotted = False
self.has_data = False
self.show_MSE = False
def plot(self):
if not self.plotted:
self.initialize_plot()
else:
self.artist.set_data(self.input_values, self.outputs)
return
@property
def outputs(self):
return np.squeeze(self.funk(self.model_inputs))
def initialize_plot(self):
self.fig = plt.figure()
self.ax = plt.subplot(111)
self.artist, = self.ax.plot(
self.input_values, self.outputs, linewidth=4,
color=np.divide([255, 204, 51], 256))
self.plotted = True
self.ax.set_ylim([0, 1])
self.ax.set_xlim([0, 1])
def make_interactive(self, log=True):
"""called in a cell after Model.plot()
to make the plot interactive."""
self.log = log
if self.log:
wandb.init(entity="wandb", project="by-hand", config={"model": "linear"})
@interact(**self.parameters.widgets)
def make(**kwargs):
for parameter in kwargs.keys():
self.parameters.dict[parameter] = kwargs[parameter]
self.parameters.update()
self.plot()
if self.log:
wandb.log(kwargs)
if self.show_MSE:
MSE = self.compute_MSE()
print("loss:\t"+str(MSE))
if self.log:
wandb.log({"train_loss": MSE})
return
return
def set_data(self, xs, ys):
self.data_inputs = self.transform_inputs(xs)
self.correct_outputs = ys
if self.has_data:
_offsets = np.asarray([xs, ys]).T
self.data_scatter.set_offsets(_offsets)
else:
self.data_scatter = self.ax.scatter(xs, ys,
color='k', alpha=0.5, s=72)
self.has_data = True
def compute_MSE(self):
"""Used in fitting models lab to display MSE performance
for hand-fitting exercises"""
outputs = np.squeeze(self.funk(self.data_inputs))
squared_errors = np.square(self.correct_outputs - outputs)
MSE = np.mean(squared_errors)
return MSE
class LinearModel(Model):
"""A linear model is a model whose transform is
the dot product of its parameters with its inputs.
Technically really an affine model, as LinearModel.transform_inputs
adds a bias term."""
def __init__(self, input_values, parameters, model_inputs=None):
if model_inputs is None:
model_inputs = self.transform_inputs(input_values)
else:
model_inputs = model_inputs
def funk(inputs):
return np.dot(self.parameters.values, inputs)
Model.__init__(self, input_values, model_inputs, parameters, funk)
def transform_inputs(self, input_values):
model_inputs = [[1]*input_values.shape[0], input_values]
return model_inputs
class Parameters(object):
"""Tracks and updates parameter values and metadata, like range and identity,
for parameters of a model. Interfaces with widget-making tools
via the Model class to make interactive widgets for Model plots."""
def __init__(self, defaults, ranges, names=None):
assert len(defaults) == len(ranges),\
"must have default and range for each parameter"
self.values = np.atleast_2d(defaults)
self.num = len(defaults)
self._zip = zip(defaults, ranges)
if names is None:
self.names = ['parameter_' + str(idx) for idx in range(self.num)]
else:
self.names = names
self.dict = dict(zip(self.names, self.values))
self.defaults = defaults
self.ranges = ranges
self.make_widgets()
def make_widgets(self):
self._widgets = [self.make_widget(parameter, idx)
for idx, parameter
in enumerate(self._zip)]
self.widgets = {self.names[idx]: _widget
for idx, _widget
in enumerate(self._widgets)}
def make_widget(self, parameter, idx):
default = parameter[0]
range = parameter[1]
name = self.names[idx]
return ipywidgets.FloatSlider(
value=default, min=range[0], max=range[1],
step=0.01, description=name)
def update(self):
sorted_keys = sorted(self.dict.keys())
self.values = np.atleast_2d([self.dict[key] for key in sorted_keys])
| gpl-2.0 |
qbilius/conv-exp | hop2008/run.py | 1 | 10927 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys, os, glob, subprocess
import cPickle as pickle
from collections import OrderedDict
import numpy as np
import scipy.io
import pandas
import seaborn as sns
import sklearn.cluster, sklearn.metrics
from psychopy_ext import stats, utils
import base
class HOP2008(base.Base):
def __init__(self, *args, **kwargs):
kwargs['skip_hmo'] = False
super(HOP2008, self).__init__(*args, **kwargs)
self.kwargs = kwargs
self.dims = OrderedDict([
('px', np.array([0,0,0,1,1,1,2,2,2])),
('shape', np.array([0,1,2,1,2,0,2,0,1]))])
self.colors = OrderedDict([('px', base.COLORS[0]),
('shape', base.COLORS[1])])
def get_images(self):
self._gen_alpha()
def mds(self):
path = os.path.join('hop2008', 'img', 'alpha', '*.*')
icons = sorted(glob.glob(path))
super(HOP2008, self).mds(icons=icons, seed=3) # to match behav
# def plot_lin(self, subplots=False):
# super(HOP2008, self).plot_lin(subplots=False)
# def corr_mod(self):
# self.dissimilarity()
# human = pickle.load(open('dis_hop2008_human.pkl', 'rb'))
# df = []
# for label, data in human.items():
# d = self.dis[self.dis.keys()[-1]]
# inds = np.triu_indices(d.shape[0], k=1)
# corr = np.corrcoef(data[inds], d[inds])[0,1]
# df.append([label, corr])
# df = pandas.DataFrame(df, columns=['layer', 'correlation'])
# sns.factorplot('layer', 'correlation', data=df,
# color='steelblue', kind='bar')
# self.show('corr_mod')
# def plot_linear_clf(self):
# xlabel = '%s layers' % self.model_name
# self.lin = self.lin.rename(columns={'layer': xlabel})
# if self.model_name == 'GoogleNet':
# g = sns.factorplot(xlabel, 'accuracy', 'kind', data=self.lin,
# kind='point', markers='None', legend=False, ci=0)
# g.axes.flat[0].set_xticklabels([])
# import matplotlib.lines as mlines
# colors = sns.color_palette('Set2', 8)[:len(self.dims)]
# handles = []
# for mname, color in zip(self.dims.keys(), colors):
# patch = mlines.Line2D([], [], color=color, label=mname)
# handles.append(patch)
# g.axes.flat[0].legend(handles=handles, loc='best')
# else:
# g = sns.factorplot(xlabel, 'accuracy', 'kind', data=self.lin,
# kind='point')
# g.axes.flat[0].axhline(1/3., ls='--', c='.2')
# self.show(pref='lin')
def avg_hop2008(self, dis, plot=True):
# dis = self.dissimilarity()
df = []
for layer, dis in dis.items():
df.extend(self._avg(dis, layer, 'px'))
df.extend(self._avg(dis, layer, 'shape'))
other = dis.copy()
for sh in np.unique(self.dims['px']):
ss = self.dims['px'] == sh
other.T[ss].T[ss] = np.nan
for sh in np.unique(self.dims['shape']):
ss = self.dims['shape'] == sh
other.T[ss].T[ss] = np.nan
inds = range(len(other))
n = 0
for si, s1 in enumerate(inds):
for s2 in inds[si+1:]:
if not np.isnan(other[s1,s2]):
df.append([layer, 'other', n, s1, s2, other[s1,s2]])
n += 1
df = pandas.DataFrame(df, columns=['layer', 'kind', 'n', 'i', 'j', 'dissimilarity'])
if plot:
df = stats.factorize(df, order={'kind': ['px', 'shape', 'other']})
# df = df[df.feature != 'other']
# agg = stats.aggregate(df, values='dissimilarity', rows='layer',
# cols='feature', yerr='n')
agg = df.pivot_table(index='n', columns=['layer', 'kind'],
values='dissimilarity')
sns.factorplot('layer', 'dissimilarity', 'kind', data=df,
kind='bar')
self.show(pref='avg')
return df
def _avg(self, dis, layer, name):
df = []
n = 0
inds = np.arange(len(self.dims[name]))
for sh in np.unique(self.dims[name]):
sel = inds[self.dims[name]==sh]
for si, s1 in enumerate(sel):
for s2 in sel[si+1:]:
df.append([layer, name, n, s1, s2, dis[s1,s2]])
n += 1
return df
def dis_group_diff(self, plot=True):
group = self.dis_group(plot=False)
agg = group.pivot_table(index=['layer','n'], columns='kind',
values='similarity').reset_index()
agg['diff'] = agg['shape'] - agg['px']
if self.bootstrap:
dfs = []
for layer in agg.layer.unique():
sel = agg[agg.layer==layer]['diff']
pct = stats.bootstrap_resample(sel, ci=None, func=np.mean)
d = OrderedDict([('kind', ['diff'] * len(pct)),
('layer', [layer]*len(pct)),
('preference', sel.mean()),
('iter', range(len(pct))),
('bootstrap', pct)])
dfs.append(pandas.DataFrame.from_dict(d))
df = pandas.concat(dfs)
else:
df = agg.groupby('layer').mean().reset_index()
df = df.rename(columns={'diff':'preference'})
df['kind'] = 'diff'
df['iter'] = 0
df['bootstrap'] = np.nan
del df['px']
del df['shape']
return df
def plot_dis_group_diff(self, subplots=False):
xlabel = '%s layer' % self.model_name
self.diff = self.diff.rename(columns={'layer': xlabel})
orange = sns.color_palette('Set2', 8)[1]
self.plot_single_model(self.diff, subplots=subplots, colors=orange)
sns.plt.axhline(0, ls='--', c='.15')
sns.plt.ylim([-.2, .8])
self.show('dis_group_diff')
def dis_group(self, plot=True):
dis = self.dissimilarity()
df = self.avg_hop2008(dis, plot=False)
df = df[df.kind != 'other'][['layer', 'kind', 'n', 'dissimilarity']]
df['similarity'] = 1 - df.dissimilarity
group = df.copy()
del group['dissimilarity']
print(group)
if self.task == 'run' and plot:
self.plot_single(group, 'dis_group')
return group
def _gen_alpha(self):
path = 'hop2008/img/alpha'
if not os.path.isdir(path): os.makedirs(path)
for f in sorted(glob.glob('hop2008/img/*.tif')):
fname = os.path.basename(f)
newname = fname.split('.')[0] + '.png'
newname = os.path.join(path, newname)
# and now some ridiculousness just because ImageMagick can't make
# alpha channel for no reason
alphaname = fname.split('.')[0] + '_alpha.png'
alphaname = os.path.join(path, alphaname)
subprocess.call('convert {} -alpha set -channel RGBA '
'-fuzz 10% -fill none '
'-floodfill +0+0 rgba(100,100,100,0) '
'{}'.format(f, newname).split())
subprocess.call('convert {} -alpha set -channel RGBA '
'-fuzz 10% -fill none '
'-floodfill +0+0 rgb(100,100,100) '
'-alpha extract {}'.format(f, alphaname).split())
im = utils.load_image(newname)
alpha = utils.load_image(alphaname)
scipy.misc.imsave(newname, np.dstack([im,im,im,alpha]))
os.remove(alphaname)
def remake_hop2008(**kwargs):
data = scipy.io.loadmat('hop2008_behav.mat')
data = np.array(list(data['behavioralDiff8'][0]))
# reorder such that all recta things are last, not first
a11 = data[:,:3,:3]
a12 = data[:,:3,3:]
a21 = data[:,3:,:3]
a22 = data[:,3:,3:]
b1 = np.concatenate([a22,a21], axis=2)
b2 = np.concatenate([a12,a11], axis=2)
b = np.concatenate([b1,b2], axis=1)
pickle.dump(b, open('dis_hop2008_behav.pkl', 'wb'))
def ceil_rel(**kwargs):
data = pickle.load(open('dis_hop2008_behav.pkl', 'rb'))
inds = np.triu_indices(data.shape[1], k=1)
df = np.array([d[inds] for d in data])
zmn = np.mean(scipy.stats.zscore(df, axis=1), axis=0)
ceil = np.mean([np.corrcoef(subj,zmn)[0,1] for subj in df])
rng = np.arange(df.shape[0])
floor = []
for s, subj in enumerate(df):
mn = np.mean(df[rng!=s], axis=0)
floor.append(np.corrcoef(subj,mn)[0,1])
floor = np.mean(floor)
return floor, ceil
class Compare(base.Compare):
def __init__(self, *args):
super(Compare, self).__init__(*args)
def dis_group(self):
return self.compare(pref='dis_group')
def dis_group_diff(self):
return self.compare(pref='dis_group_diff', ylim=[-.4,.4])
def report(**kwargs):
html = kwargs['html']
html.writeh('HOP2008', h='h1')
# html.writeh('Clustering', h='h2')
#
# kwargs['layers'] = 'all'
# kwargs['task'] = 'run'
# kwargs['func'] = 'dis_group'
# myexp = HOP2008(**kwargs)
# for depth, model_name in myexp.models:
# if depth != 'shallow':
# myexp.set_model(model_name)
# myexp.dis_group()
#
# kwargs['layers'] = 'output'
# kwargs['task'] = 'compare'
# kwargs['func'] = 'dis_group_diff'
# myexp = HOP2008(**kwargs)
# Compare(myexp).dis_group_diff()
html.writeh('MDS', h='h2')
kwargs['layers'] = 'output'
kwargs['task'] = 'run'
kwargs['func'] = 'mds'
myexp = HOP2008(**kwargs)
for name in ['px', 'shape', 'googlenet']:
myexp.set_model(name)
myexp.mds()
html.writeh('Correlation', h='h2')
kwargs['layers'] = 'all'
kwargs['task'] = 'run'
kwargs['func'] = 'corr'
myexp = HOP2008(**kwargs)
for depth, model_name in myexp.models:
if depth != 'shallow':
myexp.set_model(model_name)
myexp.corr()
kwargs['layers'] = 'output'
kwargs['task'] = 'compare'
kwargs['func'] = 'corr'
kwargs['force'] = False
kwargs['forcemodels'] = False
myexp = HOP2008(**kwargs)
Compare(myexp).corr()
| gpl-3.0 |
qiime2-plugins/normalize | q2_feature_table/_summarize/_visualizer.py | 1 | 11251 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
import shutil
import biom
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from q2_types.feature_data import DNAIterator
import q2templates
import skbio
import qiime2
import json
from ._vega_spec import vega_spec
_blast_url_template = ("http://www.ncbi.nlm.nih.gov/BLAST/Blast.cgi?"
"ALIGNMENT_VIEW=Pairwise&PROGRAM=blastn&DATABASE"
"=nt&CMD=Put&QUERY=%s")
TEMPLATES = pkg_resources.resource_filename('q2_feature_table', '_summarize')
def tabulate_seqs(output_dir: str, data: DNAIterator) -> None:
sequences = []
seq_lengths = []
with open(os.path.join(output_dir, 'sequences.fasta'), 'w') as fh:
for sequence in data:
skbio.io.write(sequence, format='fasta', into=fh)
str_seq = str(sequence)
seq_len = len(str_seq)
sequences.append({'id': sequence.metadata['id'],
'len': seq_len,
'url': _blast_url_template % str_seq,
'seq': str_seq})
seq_lengths.append(seq_len)
seq_len_stats = _compute_descriptive_stats(seq_lengths)
_write_tsvs_of_descriptive_stats(seq_len_stats, output_dir)
index = os.path.join(TEMPLATES, 'tabulate_seqs_assets', 'index.html')
q2templates.render(index, output_dir, context={'data': sequences,
'stats': seq_len_stats})
js = os.path.join(
TEMPLATES, 'tabulate_seqs_assets', 'js', 'tsorter.min.js')
os.mkdir(os.path.join(output_dir, 'js'))
shutil.copy(js, os.path.join(output_dir, 'js', 'tsorter.min.js'))
def summarize(output_dir: str, table: biom.Table,
sample_metadata: qiime2.Metadata = None) -> None:
number_of_features, number_of_samples = table.shape
sample_summary, sample_frequencies = _frequency_summary(
table, axis='sample')
if number_of_samples > 1:
# Calculate the bin count, with a minimum of 5 bins
IQR = sample_summary['3rd quartile'] - sample_summary['1st quartile']
if IQR == 0.0:
bins = 5
else:
# Freedman–Diaconis rule
bin_width = (2 * IQR) / (number_of_samples ** (1/3))
bins = max((sample_summary['Maximum frequency'] -
sample_summary['Minimum frequency']) / bin_width, 5)
sample_frequencies_ax = sns.distplot(sample_frequencies, kde=False,
rug=True, bins=int(round(bins)))
sample_frequencies_ax.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
sample_frequencies_ax.set_xlabel('Frequency per sample')
sample_frequencies_ax.set_ylabel('Number of samples')
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.pdf'))
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.png'))
plt.gcf().clear()
feature_summary, feature_frequencies = _frequency_summary(
table, axis='observation')
if number_of_features > 1:
feature_frequencies_ax = sns.distplot(feature_frequencies, kde=False,
rug=False)
feature_frequencies_ax.set_xlabel('Frequency per feature')
feature_frequencies_ax.set_ylabel('Number of features')
feature_frequencies_ax.set_xscale('log')
feature_frequencies_ax.set_yscale('log')
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.pdf'))
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.png'))
sample_summary_table = q2templates.df_to_html(
sample_summary.apply('{:,}'.format).to_frame('Frequency'))
feature_summary_table = q2templates.df_to_html(
feature_summary.apply('{:,}'.format).to_frame('Frequency'))
index = os.path.join(TEMPLATES, 'summarize_assets', 'index.html')
context = {
'number_of_samples': number_of_samples,
'number_of_features': number_of_features,
'total_frequencies': int(np.sum(sample_frequencies)),
'sample_summary_table': sample_summary_table,
'feature_summary_table': feature_summary_table,
}
feature_qualitative_data = _compute_qualitative_summary(table)
sample_frequencies.sort_values(inplace=True, ascending=False)
feature_frequencies.sort_values(inplace=True, ascending=False)
sample_frequencies.to_csv(
os.path.join(output_dir, 'sample-frequency-detail.csv'))
feature_frequencies.to_csv(
os.path.join(output_dir, 'feature-frequency-detail.csv'))
feature_frequencies = feature_frequencies.astype(int) \
.apply('{:,}'.format).to_frame('Frequency')
feature_frequencies['# of Samples Observed In'] = \
pd.Series(feature_qualitative_data).astype(int).apply('{:,}'.format)
feature_frequencies_table = q2templates.df_to_html(feature_frequencies)
sample_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'sample-frequency-detail.html')
feature_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'feature-frequency-detail.html')
context.update({'max_count': sample_frequencies.max(),
'feature_frequencies_table': feature_frequencies_table,
'feature_qualitative_data': feature_qualitative_data,
'tabs': [{'url': 'index.html',
'title': 'Overview'},
{'url': 'sample-frequency-detail.html',
'title': 'Interactive Sample Detail'},
{'url': 'feature-frequency-detail.html',
'title': 'Feature Detail'}]})
# Create a JSON object containing the Sample Frequencies to build the
# table in sample-frequency-detail.html
sample_frequencies_json = sample_frequencies.to_json()
templates = [index, sample_frequency_template, feature_frequency_template]
context.update({'frequencies_list':
json.dumps(sorted(sample_frequencies.values.tolist()))})
if sample_metadata is not None:
context.update({'vega_spec':
json.dumps(vega_spec(sample_metadata,
sample_frequencies
))
})
context.update({'sample_frequencies_json': sample_frequencies_json})
q2templates.util.copy_assets(os.path.join(TEMPLATES,
'summarize_assets',
'vega'),
output_dir)
q2templates.render(templates, output_dir, context=context)
def _compute_descriptive_stats(lst: list):
"""Basic descriptive statistics and a (parametric) seven-number summary.
Calculates descriptive statistics for a list of numerical values, including
count, min, max, mean, and a parametric seven-number-summary. This summary
includes values for the lower quartile, median, upper quartile, and
percentiles 2, 9, 91, and 98. If the data is normally distributed, these
seven percentiles will be equally spaced when plotted.
Parameters
----------
lst : list of int or float values
Returns
-------
dict
a dictionary containing the following descriptive statistics:
count
int: the number of items in `lst`
min
int or float: the smallest number in `lst`
max
int or float: the largest number in `lst`
mean
float: the mean of `lst`
range
int or float: the range of values in `lst`
std
float: the standard deviation of values in `lst`
seven_num_summ_percentiles
list of floats: the parameter percentiles used to calculate this
seven-number summary: [2, 9, 25, 50, 75, 91, 98]
seven_num_summ_values
list of floats: the calculated percentile values of the summary
"""
# NOTE: With .describe(), NaN values in passed lst are excluded by default
if len(lst) == 0:
raise ValueError('No values provided.')
seq_lengths = pd.Series(lst)
seven_num_summ_percentiles = [0.02, 0.09, 0.25, 0.5, 0.75, 0.91, 0.98]
descriptive_stats = seq_lengths.describe(
percentiles=seven_num_summ_percentiles)
return {'count': int(descriptive_stats.loc['count']),
'min': descriptive_stats.loc['min'],
'max': descriptive_stats.loc['max'],
'range': descriptive_stats.loc['max'] -
descriptive_stats.loc['min'],
'mean': descriptive_stats.loc['mean'],
'std': descriptive_stats.loc['std'],
'seven_num_summ_percentiles': seven_num_summ_percentiles,
'seven_num_summ_values': descriptive_stats.loc['2%':'98%'].tolist()
}
def _write_tsvs_of_descriptive_stats(dictionary: dict, output_dir: str):
descriptive_stats = ['count', 'min', 'max', 'mean', 'range', 'std']
stat_list = []
for key in descriptive_stats:
stat_list.append(dictionary[key])
descriptive_stats = pd.DataFrame(
{'Statistic': descriptive_stats, 'Value': stat_list})
descriptive_stats.to_csv(
os.path.join(output_dir, 'descriptive_stats.tsv'),
sep='\t', index=False, float_format='%g')
seven_number_summary = pd.DataFrame(
{'Quantile': dictionary['seven_num_summ_percentiles'],
'Value': dictionary['seven_num_summ_values']})
seven_number_summary.to_csv(
os.path.join(output_dir, 'seven_number_summary.tsv'),
sep='\t', index=False, float_format='%g')
def _compute_qualitative_summary(table):
table = table.transpose()
sample_count = {}
for count_vector, feature_id, _ in table.iter():
sample_count[feature_id] = (count_vector != 0).sum()
return sample_count
def _frequencies(table, axis):
return pd.Series(data=table.sum(axis=axis), index=table.ids(axis=axis))
def _frequency_summary(table, axis='sample'):
frequencies = _frequencies(table, axis=axis)
summary = pd.Series([frequencies.min(), frequencies.quantile(0.25),
frequencies.median(), frequencies.quantile(0.75),
frequencies.max(), frequencies.mean()],
index=['Minimum frequency', '1st quartile',
'Median frequency', '3rd quartile',
'Maximum frequency', 'Mean frequency'])
return summary, frequencies
| bsd-3-clause |
ycaihua/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
gclenaghan/scikit-learn | sklearn/svm/classes.py | 7 | 40216 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
nikitaswinnen/model-for-predicting-rapid-response-team-events | Data Science Notebooks/Pipeline/SRC/my_impala_utils.py | 1 | 1569 | import pandas as pd
import numpy as np
def create_modeling_table(df, table_name, schema, CUR, drop_if_exists=True):
"""
Input: table_name (string), schema (dict), CUR (Impala connection cursor obj), drop_if_exists (boolean)
Output: None
Description: Converts dictionary key, value pairs into a SQL formatted schema.
"""
# First ensure that the schema columns are in the same order as the dataframe columns
ordered_pairs = []
for col_name in df.columns:
ordered_pairs.append(str(col_name) + ' ' + str(schema[col_name]))
sql_formatted_schema = str(tuple(ordered_pairs)).replace("'", "")
sql_schema = "CREATE TABLE {0} {1}".format(table_name, sql_formatted_schema)
if drop_if_exists:
CUR.execute("DROP TABLE IF EXISTS {0}".format(table_name))
CUR.execute(sql_schema)
print "{0} successfully created!".format(table_name)
print "\n"
return None
def insert_pandas_to_impala(df, table_name, CUR):
"""
Input: table_name (pandas dataframe), table_name (string)
Output: None
Description: Formats an Impala query to bulk insert all of the dataframe rows into a Hive table.
"""
insert_query = "INSERT INTO {0} VALUES".format(table_name)
for ix, row in enumerate(df.as_matrix()):
if ix == len(df.as_matrix()) - 1:
insert_query += str(tuple(row))
else:
insert_query += str(tuple(row)) + ", "
CUR.execute(insert_query)
print "{0} rows inserted into Hive table {1}".format(ix + 1, table_name)
print "\n"
return None | apache-2.0 |
merenlab/web | data/sar11-saavs/files/p-get_percent_identity.py | 1 | 14809 | '''loop over a bam file and get the edit distance to the reference genome stored in the NM tag
scale by aligned read length. works for bowtie2, maybe others. Adopted from
https://gigabaseorgigabyte.wordpress.com/2017/04/14/getting-the-edit-distance-from-a-bam-alignment-a-journey/'''
import sys
import pysam
import argparse
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
ap = argparse.ArgumentParser()
ap.add_argument("-b", required=False, help="bam filepaths comma separated (no spaces)")
ap.add_argument("-B", required=False, help="alternatively, a file of bam filepaths")
ap.add_argument("-p", required=False, action='store_true', help="provide if you want proper percent identity (unaligned bps are included in normalization)")
ap.add_argument("-u", required=False, action='store_true', help="if provided, histograms will be unnormalized")
ap.add_argument("-o", required=True, help="output filepath")
ap.add_argument("-r", required=False, default=None, help="If provided, only reads for specified references are considered. Comma separated.")
ap.add_argument("-R", required=False, default=None, help="If provided, only reads for specified references are considered. Filepath of list of references.")
ap.add_argument("-g", required=False, help="gene caller ids to report, comma-separated. requires -a flag")
ap.add_argument("-G", required=False, help="filepath of gene caller ids to report, single column file. requires -a flag")
ap.add_argument("-x", required=False, action='store_true', help="collapse histograms of individual genes into a single histogram per bam file. Valid only with -g and -G")
ap.add_argument("-a", required=False, help="output file of anvi-export-gene-calls, required for -g and -G, incompatible with -R")
ap.add_argument("-m", required=False, action='store_true', help="If provided, histogram will only be generated for the MEDIAN read length in each bam file. May want to use with -nummismatches")
ap.add_argument("-nummismatches", required=False, action='store_true', help="If provided, values are number of mismatches instead of percent identity. Highly recommended to use this with -m flag")
ap.add_argument("-mode", required=False, default='histogram', help="by default, this program reports histogram curves (-mode histogram). If ``-mode raw``, histograms are not computed and instead each read considered is a written with its percent identity value")
# These are parameters for mode = 'histogram'
ap.add_argument("-binsize", required=False, type=float, default=None, help="Size of each bin. Overrides behavior of -numbins")
ap.add_argument("-autobin", required=False, action='store_true', help="Size of each bin determined on a per bam basis (creates one bin for each mismatch, but you still must supply a -range value). -m is required for this mode")
ap.add_argument("-numbins", required=False, default=None, help="How many bins? default is 30")
ap.add_argument("-interpolate", default=None, required=False, help="How many points should form the interpolation and from where to where? Format is 'start,end,number'. (e.g. 67,100,200) If not provided, no interpolation. Required for autobin to normalize x-axis between bam files")
ap.add_argument("-range", required=False, default=None, help="What's the range? provide lower and upper bound comma-separated. default is 50,100")
ap.add_argument("-melted", required=False, action='store_true', help="Use melted output format. Required if using -autobin since each bam could have different bins")
# These are parameters for mode = 'raw'
ap.add_argument("-subsample", required=False, type=int, default=None, help="how many reads do you want to subsample? You probably don't need more than 50,000.")
args = vars(ap.parse_args())
sc = lambda parameters: any([args.get(parameter, False) for parameter in parameters])
raw_mode_parameters = ['subsample']
histogram_mode_parameters = ['binsize', 'autobin', 'numbins', 'interpolate', 'range', 'melted']
if args.get("mode") == 'histogram':
if sc(raw_mode_parameters):
raise Exception(" you are using mode = histogram. Don't use these parameters: {}".format(raw_mode_parameters))
# defaults
if not args.get("numbins"):
args['numbins'] = 30
if not args.get("range"):
args['range'] = "50,100"
if args.get("mode") == 'raw':
if sc(histogram_mode_parameters):
raise Exception(" you are using mode = raw. Don't use these parameters: {}".format(histogram_mode_parameters))
# checks
if args.get("g") and args.get("G"):
raise Exception("use -g or -G")
if (args.get("g") or args.get("G")) and not args.get("a"):
raise Exception("provide -a")
if (args.get("g") or args.get("G")) and (args.get("R") or args.get("r")):
raise Exception("no point providing -R/-r if using gene calls")
if args.get("x") and not (args.get('g') or args.get('G')):
raise Exception("you need to specify -g or -G to use -x")
if args.get("b") and args.get('B'):
raise Exception("specify either b or B, not both")
if not args.get("b") and not args.get('B'):
raise Exception("Specify one of b or B")
if args.get('autobin') and not args.get("melted"):
raise Exception("You can't autobin without using -melted output format.")
if args.get('autobin') and not args.get("m"):
raise Exception("You can't autobin without using -m.")
if args.get('autobin') and not args.get("interpolate"):
raise Exception("You can't autobin without using -interpolate.")
if args.get("g"):
genes = [int(x) for x in args['g'].split(',')]
elif args.get("G"):
print(args['G'])
genes = [int(x.strip()) for x in open(args['G']).readlines()]
else:
genes = None
if args.get("a"):
gene_info = pd.read_csv(args['a'], sep='\t')
if genes:
gene_info = gene_info[gene_info['gene_callers_id'].isin(genes)]
else:
gene_info = None
if args.get('b'):
bam_filepaths = args['b'].split(",") # comma separated bam file paths
if bam_filepaths[-1] == '':
bam_filepaths = bam_filepaths[:-1]
elif args.get('B'):
bam_filepaths = [x.strip() for x in open(args['B']).readlines()]
if not bam_filepaths:
print('no bam files provided. nothing to do.')
sys.exit()
proper_pident = args['p']
output = args['o']
if args.get('r'):
reference_names = args['r'].split(",") # comma separated reference names
elif args.get('R'):
reference_names = [x.strip() for x in open(args['R']).readlines()]
else:
reference_names = [None]
normalize = True if not args['u'] else False
############################################################################################################################
# preamble
attr = 'query_alignment_length' if not proper_pident else 'query_length'
if args.get('mode') == 'histogram':
range_low, range_hi = [int(x) for x in args['range'].split(",")]
if not args.get("autobin"):
if not args.get("binsize"):
bins_template = np.linspace(range_low, range_hi, int(args['numbins']), endpoint=True)
else:
bins_template = np.arange(range_hi, range_low - float(args['binsize']), -float(args['binsize']))[::-1]
if not args.get("nummismatches"):
bins_shifted = bins_template[1:] # include 100% as a datapoint
else:
bins_shifted = bins_template[:-1] # include 0 mismatches as a datapoint
if args.get("interpolate"):
interp_low, interp_hi, interp_num = [int(x) for x in args['interpolate'].split(",")]
interp_low += 0.5
bins_interp = np.linspace(interp_low, interp_hi, interp_num)
else:
bins_interp = np.array([])
I = lambda bins_shifted, counts, bins_interp: interp1d(bins_shifted, counts, kind='cubic')(bins_interp) if args['interpolate'] else counts
J = lambda true_or_false, length: length == median_length if true_or_false else True
K = lambda true_or_false, read: read.get_tag("NM") if true_or_false else 100*(1 - float(read.get_tag("NM")) / read.__getattribute__(attr))
if args.get("mode") == 'histogram':
percent_identity_hist = {'value':[], 'percent_identity':[], 'id':[]} if args.get("melted") else {}
if args.get("mode") == 'raw':
percent_identity_hist = {'value':[], 'id':[]}
for bam in bam_filepaths:
print('working on {}...'.format(bam))
bam_name = bam.split("/")[-1].replace(".bam", "")
samfile = pysam.AlignmentFile(bam, "rb")
if args.get("m"):
i = 0
read_lengths = []
for read in samfile.fetch():
read_lengths.append(read.query_length)
i += 1
array = np.array(read_lengths)
median_length = int(np.median(array))
second_median = int(np.median(array[array != float(median_length)]))
third_median = int(np.median(array[(array != float(median_length)) & (array != float(second_median))]))
print('median length was {}, second was {}, third was {}'.format(median_length, second_median, third_median))
# only if autobinning
if args.get("autobin"):
if not args.get("nummismatches"):
binsize = 100*(1. / median_length)
bins_template = np.arange(range_hi, range_low - binsize, -binsize)[::-1]
bins_template += binsize * 1e-4
bins_template[-1] = 100
bins_shifted = bins_template[1:] # include 100% as a datapoint
else:
bins_template = np.arange(range_hi, range_low - 1, -1)[::-1]
bins_shifted = bins_template[:-1] # include 0 mismatches as a datapoint
if gene_info is not None:
if not args.get('x'):
# each gene gets its own histogram
for index, row in gene_info.iterrows():
percent_identities = np.array([K(args.get("nummismatches"), read) for read in samfile.fetch(row['contig'], int(row['start']), int(row['stop'])) if J(args.get("m"), read.query_length)])
id_name = bam_name + "_" + str(row['gene_callers_id'])
if args.get("mode") == 'histogram':
counts, _ = np.histogram(percent_identities, bins=bins_template, density=normalize)
if args.get("melted"):
value = list(I(bins_shifted, counts, bins_interp))
percent_identity = list(bins_shifted if not args.get("interpolate") else bins_interp)
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
percent_identity_hist['percent_identity'].extend(percent_identity)
else:
percent_identity_hist[id_name] = I(bins_shifted, counts, bins_interp)
elif args.get("mode") == 'raw':
if args.get("subsample") and args['subsample'] < len(percent_identities):
percent_identities = np.random.choice(percent_identities, args.get('subsample'), replace=False)
value = percent_identities.tolist()
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
else:
# histograms for each gene are collapsed into a single histogram
percent_identities = []
for index, row in gene_info.iterrows():
percent_identities.extend([K(args.get("nummismatches"), read) for read in samfile.fetch(row['contig'], int(row['start']), int(row['stop'])) if J(args.get("m"), read.query_length)])
percent_identities = np.array(percent_identities)
id_name = bam_name
if args.get("mode") == 'histogram':
counts, _ = np.histogram(percent_identities, bins=bins_template, density=normalize)
if args.get("melted"):
value = list(I(bins_shifted, counts, bins_interp))
percent_identity = list(bins_shifted if not args.get("interpolate") else bins_interp)
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
percent_identity_hist['percent_identity'].extend(percent_identity)
else:
percent_identity_hist[id_name] = I(bins_shifted, counts, bins_interp)
elif args.get("mode") == 'raw':
if args.get("subsample") and args['subsample'] < len(percent_identities):
percent_identities = np.random.choice(percent_identities, args.get('subsample'), replace=False)
value = percent_identities.tolist()
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
else:
percent_identities = []
for reference_name in reference_names:
# 1 minus the ratio of mismatches to the length of the read/alignment
percent_identities.extend([K(args.get("nummismatches"), read) for read in samfile.fetch(reference=reference_name) if J(args.get("m"), read.query_length)])
percent_identities = np.array(percent_identities)
print("{} reads considered".format(len(percent_identities)))
# create a histogram
id_name = bam_name
if args.get("mode") == 'histogram':
counts, _ = np.histogram(percent_identities, bins=bins_template, density=normalize)
if args.get("melted"):
value = list(I(bins_shifted, counts, bins_interp))
percent_identity = list(bins_shifted if not args.get("interpolate") else bins_interp)
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
percent_identity_hist['percent_identity'].extend(percent_identity)
else:
percent_identity_hist[id_name] = I(bins_shifted, counts, bins_interp)
if args.get("mode") == 'raw':
if args.get("subsample") and args['subsample'] < len(percent_identities):
percent_identities = np.random.choice(percent_identities, args.get('subsample'), replace=False)
value = percent_identities.tolist()
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
samfile.close()
print("")
# save the file
percent_identity_hist = pd.DataFrame(percent_identity_hist).reset_index(drop=True)
if not args.get("melted") and args.get("mode") == 'histogram':
percent_identity_hist['percent_identity' if not args['nummismatches'] else 'number_of_mismatches'] = bins_interp if args['interpolate'] else bins_shifted
percent_identity_hist.to_csv(output, sep="\t", index=False)
| mit |
jms-dipadua/financial-forecasting | forecast_main.py | 1 | 20208 | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import yaml
#import h5py
from sklearn import svm
from sklearn.metrics import f1_score, accuracy_score, mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split, StratifiedKFold, KFold
from sklearn.learning_curve import learning_curve
from sklearn.grid_search import GridSearchCV
from sklearn.externals import joblib
from keras.models import Sequential, model_from_yaml
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
#from keras.utils.visualize_util import plot
#import pydot
#import graphviz
class Company:
def __init__(self):
self.get_params()
self.read_file()
self.initial_data_drop()
self.gen_train_test()
def get_params(self):
print "welcome to the jungle."
self.base_file = raw_input("RAW COMPANY file: ") # base file
self.root_dir = 'data/working/v5/' # version directory
self.fin_dir = 'data/outputs/v5/' # version directory
self.experiment_version = raw_input("Experiment Version: ")
self.fin_file_name = self.fin_dir + self.experiment_version +'.csv' # --> USE THE ROOT EXP FOR FILES? OR JUST ONE OUPUT?
self.pl_file_name = self.fin_dir + self.experiment_version +'_pl.csv'
def read_file(self):
print "reading file"
self.raw_data = pd.read_csv(self.root_dir+self.base_file)
def initial_data_drop(self):
self.raw_data2 = self.raw_data
print "initial_data_drop (IDs & Dates)"
columns = list(self.raw_data.columns.values)
if 'id' in columns:
self.raw_data = self.raw_data.drop(['id'], axis=1)
if 'Volume' in columns:
self.raw_data = self.raw_data.drop(['Volume'], axis=1)
if 'Date' in columns:
self.raw_dates = self.raw_data['Date']
#print self.raw_dates
self.raw_data = self.raw_data.drop(['Date'], axis=1)
# the following section is for experiment customization: ie selection of which inputs to keep or drop
columns = list(self.raw_data.columns.values)
#drop_cols = []
#drop_col_nums =[] # use this so i can make it more reliable for future experiments (i.e. when dropping the same columns across different companies)
counter = 0
# get the columns to keep (manual version)
"""
drop_cols = []
drop_col_nums = []
for column in columns:
print "Keep (1) or DROP (0): %r" % column
if int(raw_input()) == 0:
drop_cols.append(column)
drop_col_nums.append(counter)
counter += 1
print drop_cols # so i can keep track of this for experiment documentation purposes
print drop_col_nums
"""
# v5-1
#drop_cols = ['DGS10', 'DCOILBRENTEU', 'xCIVPART', 'UNRATE', 'CPIAUCSL', 'GFDEGDQ188S', 'HOUST', 'IC4WSA', 'USD3MTD156N', 'PCE', 'PSAVERT', 'xA191RL1Q225SBEA', 'spClose', 'DEXUSEU', 'EPS', '12mo-EPS', 'net_income', 'total_assets', 'total_revenue', 'free_cash_flow', 'total_liabilities', 'profit_margin']
#drop_col_nums = [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
# v5-2
#drop_cols = ['Open', 'High', 'Low', 'SMA-5', 'SMA-15', 'SMA-50', 'SMA-200', 'WMA-10', 'WMA-30', 'WMA-100', 'WMA-200', 'cci-20', 'rsi-14']
#drop_col_nums = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
#self.raw_data.drop(self.raw_data.columns[drop_col_nums], axis = 1, inplace=True)
# v5-3 ("all params")
print list(self.raw_data.columns.values) # again for documentation purposes
def gen_train_test(self):
# split timeseries
print "generating x_train, y_train, x_test"
data_shape = self.raw_data.shape[0]
print "data_shape of raw_data: %r" % data_shape
train_len = int(round(data_shape * .9)) # get 90% of data for train
print "train_len of raw_data: %r" % train_len
# get rid of any NaN that may have appeared in there
self.raw_data.replace(to_replace = np.nan, value = 0, inplace=True)
X_train = self.raw_data.ix[0:train_len-1, :] # one less than train_len; train_len will be start of test
X_train2 = self.raw_data2.ix[0:train_len-1, :]
# last row of data set won't have a prior-day but that's okay, we can just drop it (since there's no way to validate it)
X_test = self.raw_data.ix[train_len:data_shape-2, :] # ones less than data_shape because of 0-index + row dropping
X_test2 = self.raw_data2.ix[train_len:data_shape-2, :]
# generate / extract y_vals : y_train & y_valid
# first row has no prior-day information but day 2 is its y-val
y_vals_raw = self.raw_data.loc[:,['Close']]
# RENAME AXIS
y_vals_raw.rename(columns={'Close': 'nxtDayClose'}, inplace=True)
# zero indexing takes care of needing to manipulate by one here
# drop first day because need "day +1" close price as the "train target" based on "day's feature inputs"
y_train = y_vals_raw.ix[1:train_len]
y_test = y_vals_raw.ix[train_len+1:data_shape-1, :] # but as with X_test, we will later drop the last row
# && drop last row from y_valid
#y_valid = y_valid.iloc[0:y_valid.shape[0]-1] # as above. awkward.
self.X_train = X_train
self.X_train2 = X_train2
# also do checks on head / tail
# to test head, swap head for tail
# commented out when not testing
#print self.X_train.tail(5)
self.y_train = y_train.as_matrix() # make sure they're matrix/vectors
#print self.y_train[-5:-1]
self.X_test = X_test
self.X_test2 = X_test2
#print self.X_test.tail(5)
self.y_test = y_test.as_matrix()
#print self.y_valid[-1]
self.y_dates = self.raw_dates.ix[train_len+1:data_shape-1].as_matrix()
# last step is to generate a cross-validation set
# since we're in time series, we can't randomize (hence this process and not sci-kit...)
# we'll dedicate 90% of data set to train, 10% to cross-validation
data_shape = self.X_train.shape[0]
train_len = int(round(data_shape * .9))
X_train = self.X_train[0: train_len - 1]
X_cv = self.X_train[train_len: data_shape]
self.X_train = X_train
self.X_cv = X_cv
y_train = self.y_train[0: train_len-1]
y_cv = self.y_train[train_len: data_shape]
self.y_train = y_train
self.y_cv = y_cv
print "shapes of final train/tests: \n x_train: %r \n y_train: %r \n x_cv: %r \n y_cv: %r \n x_test: %r \n y_test: %r" % (X_train.shape, y_train.shape, X_cv.shape, y_cv.shape, X_test.shape, y_test.shape)
return
class Forecast:
def __init__(self):
self.company = Company()
self.basic_vis()
self.pre_process_data() #v1.x-ish: scaling, PCA, etc
self.svm() # uses self.company.X_train/test, etc
self.ann() # uses self.company.X_train/test, etc
# self.ensemble() # v1.x
self.svm_decisions, self.svm_gain_loss = self.decisions(self.svm_preds) # this has to ouptut // generate a notion of "shares held"
self.ann_decisions, self.ann_gain_loss = self.decisions(self.ann_preds) # this has to ouptut // generate a notion of "shares held"
self.buy_hold_prof_loss()
self.profit_loss_rollup()
self.write_final_file()
def pre_process_data(self):
# some STRUCTURE and CLEAN UP
# convert to numpy and numbers
self.company.y_train = np.array(self.company.y_train[0:,0]) # need to recast for some reason...
self.company.y_train = self.company.y_train.astype(float)
# so do y_valid too
self.company.y_test = np.array(self.company.y_test[0:,0])
# company.y_valid is an object..not sure...but this converts it
self.company.y_test = self.company.y_test.astype(float)
#print self.company.y_valid.dtype
# SCALE input values ...not sure if i should do the target...
scaler = StandardScaler()
self.daily_highs = self.company.X_test2['High']
self.daily_lows = self.company.X_test2['Low']
self.company.X_train = scaler.fit_transform(self.company.X_train)
self.company.X_test = scaler.fit_transform(self.company.X_test)
self.company.X_cv = scaler.fit_transform(self.company.X_cv)
# make true train and CV split
#self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.company.X_train, self.company.y_train, test_size=0.33, random_state=42)
return
def basic_vis(self):
# TODO :: shift so that its not a correlation with the OPEN but with the CLOSE (since that's the dependent var)
correlations = self.company.X_train.corr() # uses pandas built in correlation
# Generate a mask for the upper triangle (cuz it's just distracting)
mask = np.zeros_like(correlations, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
plt.title("Feature Correlations")
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(correlations, mask=mask, cmap=cmap, vmax=.3,
square=False, xticklabels=3, yticklabels=True,
linewidths=.6, cbar_kws={"shrink": .5}, ax=ax)
plt.yticks(rotation=0)
#plt.show()
f.savefig(self.company.fin_dir + '/correlation-images/' + self.company.experiment_version+'.png')
def svm(self):
# for regression problems, scikitlearn uses SVR: support vector regression
C_range = np.logspace(0, 4, 6) # normally 12; doing 10 for now due to run-time length
#print C_range
gamma_range = np.logspace(-5, 1, 6) # normally 12; doing 10 for now due to run-time length
#print gamma_range
param_grid = dict(gamma=gamma_range, C=C_range)
# based on LONG test with the gridsearch (see notes) for v4b-5
# below is rounded numbers
#param_grid = dict(C=[432876], gamma=[1.8738])
## probably want to introduce max iterations...
grid = GridSearchCV(svm.SVR(kernel='rbf', verbose=True), param_grid=param_grid, cv=2, scoring = 'mean_squared_error')
grid.fit(self.company.X_train, self.company.y_train)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
self.svm_preds = grid.predict(self.company.X_test)
# this is for repeating or one-off specific experiments
#self.svm_C = float(raw_input("input C val: "))
#self.svm_gamma = float(raw_input("input gamma val: "))
#regression = svm.SVR(kernel='rbf', C=self.svm_C, gamma=self.svm_gamma, verbose=True)
#regression.fit(self.X_train, self.y_train)
#self.svm_preds = regression.predict(self.company.X_test)
#print self.svm_preds
self.svm_mse_cv = grid.score(self.company.X_cv, self.company.y_cv)
print "(cv) Mean Squared Error: %f" % self.svm_mse_cv
self.svm_mse_test = grid.score(self.company.X_cv, self.company.y_cv)
print "(test) Mean Squared Error: %f" % self.svm_mse_test
# save the parameters to a file
joblib.dump(grid.best_estimator_, self.company.fin_dir + '/svm-models/' + self.company.experiment_version +'_svm_model.pkl')
# visualize results
plt.figure()
plt.title("SVM Learning Curve: " + self.company.experiment_version)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
grid.best_estimator_, self.company.X_train, self.company.y_train, cv=5, train_sizes=[50, 100, 200, 300, 400, 500, 600])
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,test_scores_mean + test_scores_std,
alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
plt.savefig(self.company.fin_dir + '/svm-learning-curves/' + self.company.experiment_version+'.png')
def ann(self):
#print self.company.X_train.shape[1]
model = Sequential()
model.add(Dense(input_dim=self.company.X_train.shape[1], output_dim=50, init="glorot_uniform"))
#model.add(Activation('tanh'))
model.add(Dropout(0.1))
model.add(Dense(input_dim=50, output_dim=10, init="uniform"))
model.add(Activation('tanh'))
#model.add(Dropout(0.5))
model.add(Dense(input_dim=10, output_dim=1, init="glorot_uniform"))
model.add(Activation("linear"))
sgd = SGD(lr=0.3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer='rmsprop')
early_stopping = EarlyStopping(monitor='val_loss', patience=110)
model.fit(self.company.X_train, self.company.y_train, nb_epoch=1000, validation_split=.1, batch_size=16, verbose = 1, show_accuracy = True, shuffle = False, callbacks=[early_stopping])
self.ann_mse = model.evaluate(self.company.X_cv, self.company.y_cv, show_accuracy=True, batch_size=16)
print self.ann_mse
self.ann_preds = model.predict(self.company.X_test)
yaml_string = model.to_yaml()
with open(self.company.fin_dir + '/ann-models/' + self.company.experiment_version +'_ann_model.yml', 'w+') as outfile:
outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
#model.save_weights(self.company.fin_dir + '/ann-models/' + self.company.experiment_version +'_ann_weights')
"""
nb_features = self.company.X_train.shape[1]
X_train = self.company.X_train.reshape(self.company.X_train.shape + (1, ))
X_test = self.company.X_test.reshape(self.company.X_test.shape + (1, ))
print X_train.shape
model = Sequential()
model.add(Convolution1D(nb_filter = 24, filter_length = 1, input_shape =(nb_features,1) ))
model.add(Activation("tanh"))
model.add(Dropout(0.2)) # some dropout to help w/ overfitting
model.add(Convolution1D(nb_filter = 48, filter_length= 1, subsample_length= 1))
model.add(Activation("tanh"))
model.add(Convolution1D(nb_filter = 96, filter_length= 1, subsample_length=1))
model.add(Activation("tanh"))
model.add(Dropout(0.3))
model.add(Convolution1D(nb_filter = 192, filter_length= 1, subsample_length=1))
model.add(Activation("tanh"))
model.add(Dropout(0.6))
model.add(MaxPooling1D(pool_length=2))
# flatten to add dense layers
model.add(Flatten())
#model.add(Dense(input_dim=nb_features, output_dim=50))
model.add(Dense(nb_features * 2))
model.add(Activation("tanh"))
#model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation("linear"))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer='sgd')
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model.fit(X_train, self.company.y_train, nb_epoch=50, validation_split=0.25, verbose = 1, callbacks=[early_stopping])
self.ann_preds = model.predict(X_test)
"""
#print self.ann_preds
#print "Trained ANN Score: %r" % score
# visualize
#plot(model, to_file= '/ann-training/' + self.company.fin_file_name + '.png')
return
def ensemble(self):
return
def decisions(self, predictions):
# intializations: self.shares_held = 0 & buy_price = 0
self.shares_held = 0
self.buy_price = 0
decisions = []
gain_loss = []
num_preds = predictions.shape[0]
#print "total number of predictions: %f" % num_preds
#print "shape of y_test: %f " % self.company.y_test.shape
# loop through each prediction and make a purchase decision
# uses for-i loop because i want to use the int for indexing within
for i in range(0,num_preds):
# SETUP
# the actual close value
actual_close = round(self.company.y_test[i],3)
day_high = self.daily_highs.iloc[i]
day_low = self.daily_lows.iloc[i]
# the previous close, pulled from y_train (for first row of x) and y_test
if i == 0:
prv_close = round(self.company.y_train[-1],3)
else:
prv_close = round(self.company.y_test[i-1],3)
#print "%r :: %r" % (prv_close, predictions[i])
# *have* to liquidate on the last day
if (i == num_preds -1) and (self.shares_held > 0):
sell_price = (day_high + day_low) / 2 # mean of prv & actual..."market-ish price"
gain_loss.append(sell_price * self.shares_held - self.buy_price * self.shares_held )
decisions.append("final_day_liquidation")
break
# ACTUAL DECISIONS
# buy
if predictions[i] > prv_close and self.shares_held == 0:
# have to fabricate a buy price: using mean of prv close & actual close...seems sort of realistic...could do mean of high, low, open too...
self.buy_price = round((day_high + day_low) / 2, 3)
self.shares_held = int(round(1000 / self.buy_price))
#print "shares purchased: %r at %r" % (self.shares_held, self.buy_price)
#print "actual close: %r :: predicted close: %r :: previous close: %r " % (actual_close, predictions[i], prv_close)
decisions.append("purchase")
# sells (stop loss)
elif (self.buy_price > prv_close) and (self.shares_held > 0):
# stop loss check; if not > 3% loss, then no change
if (prv_close / self.buy_price) < .97:
sell_price = (day_high + day_low) / 2 # mean of prv & actual..."market-ish price"
gain_loss.append(sell_price * self.shares_held - self.buy_price * self.shares_held)
# reset holdings
self.shares_held = 0
self.buy_price = 0
decisions.append("stop_loss_sell")
else: # could do dollar cost averaging here (if wanted to get fancy)
decisions.append("Hold")
# sells (stop gain)
elif (self.buy_price < prv_close) and (self.shares_held > 0):
# stop gain check; if not > 10% gain, then no change
if (prv_close / self.buy_price) > 1.09:
sell_price = (day_high + day_low) / 2 # mean of prv & actual..."market-ish price"
gain_loss.append(sell_price * self.shares_held - self.buy_price * self.shares_held )
self.shares_held = 0
self.buy_price = 0
decisions.append("stop_gain_sell")
else:
decisions.append("Hold")
else:
decisions.append("Hold")
#print decisions
return decisions, gain_loss
def profit_loss_rollup(self):
# could output something like shares purchased / sold, cost basis & exit-price
# for now just a single line
columns = ["Profit/Loss"]
index = ["BUY-HOLD", "SVM", "ANN", "SVM-MSE-CV", "SVM-MSE-TEST", "ANN-MSE"]
self.profit_df = [self.bh_pl, np.sum(self.svm_gain_loss), np.sum(self.ann_gain_loss), self.svm_mse_cv, self.svm_mse_test, self.ann_mse]
self.profit_df = pd.DataFrame(self.profit_df, index=index, columns=columns)
print "Buy & Hold profit/loss %r" % self.bh_pl
#print self.svm_decisions
print "SVM profit/loss %r" % np.sum(self.svm_gain_loss)
#print self.ann_decisions
print "ANN profit/loss %r" % np.sum(self.ann_gain_loss)
return
def buy_hold_prof_loss(self):
# buy price somewhere (mean) between the previous two period close prices
buy_price = round((self.company.y_test[0] + self.company.y_train[-1]) / 2,3)
shares_purchased = int(round(1000/ buy_price, 0))
# sell price somewhere (mean) between the previous two perioud close prices
sell_price = round((self.company.y_test[-2] + self.company.y_test[-1]) /2 ,3)
self.bh_pl = sell_price * shares_purchased - buy_price * shares_purchased
return
def write_final_file(self):
columns = ['Actual', 'SVM', 'ANN', 'SVM-decisons', 'ANN-decisions']
# going to make a data frame to print to a csv
# but preds were not all in the same shape
# this helps with that and merges them all up
self.final_df = np.vstack((self.company.y_test, self.svm_preds))
self.final_df = np.transpose(self.final_df)
self.final_df = np.hstack((self.final_df, self.ann_preds))
#print self.final_df.shape
#print np.array( [self.svm_decisions] ).shape
self.final_df = np.hstack((self.final_df, np.transpose(np.array( [self.svm_decisions] )) ))
self.final_df = np.hstack((self.final_df, np.transpose(np.array( [self.ann_decisions] )) ))
self.final_df = pd.DataFrame(self.final_df, columns=columns)
self.final_df['Date'] = self.company.y_dates
final_file = self.final_df.to_csv(self.company.fin_file_name,index_label='id')
pl_fin_file = self.profit_df.to_csv(self.company.pl_file_name, index=True)
return
if __name__ == "__main__":
forecast = Forecast() | gpl-3.0 |
RPGroup-PBoC/gist_pboc_2017 | code/optical_trap_calibration_centroid.py | 1 | 6367 | # Import the necessary modules.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
# For image processing
import skimage.io
import skimage.filters
import skimage.segmentation
import skimage.measure
# In this script, we will calibrate the strength of an optical trap. The data
# set for this script is of a one micron bead trapped in a laser optical trap
# with a 5.2x beam expansion from a 1 mm diameter red laser source. This image
# was taken at 30 frames per second and the interpixel distance of the camera
# is 42 nm per pixel at 22 C. As we discussed in class, we know that the force of
# the trap is related to the mean squared displacement of the bead by
#
# F_trap = <(x - x_avg)^2> / kBT
#
# where <(x - x_avg)^2> is the mean squared displacement of the bead, kB is the
# Boltzmann constant, and T is the temperature of the system. Our goal is to
# determine the mean squared displacement of the bead in the trap. To do so,
# we will use some image processing techniques to segment the bead and identify
# the centroid. You should note that if we were calibrating this optical trap
# for "real-life" experiments, we would use more sophisticated techniques to
# calculate the trap to a sufficient precision.
# To begin, let's load up the time series of our bead and look at the first
# image.
bead_ims = skimage.io.imread('data/optical_tweezer/trapped_bead_5.2x_4_MMStack_Pos0.ome.tif')
plt.figure()
plt.imshow(bead_ims[0], cmap=plt.cm.Greys_r)
plt.show()
# We see that the bead is dark on a light background with some fringing on
# the side. We took these images such that the bead was dark to simplify our
# segmentation analysis.
# Because these images are relatively clean, we can imagine using a threshold
# to determine what is bead and what is not. However, there is a lot of noise
# in the background of this image. Before we identify a threshold, let's try
# blurring the image with a gaussian blur to smooth it out. We'll then look
# at the histogram.
im_blur = skimage.filters.gaussian(bead_ims[0], sigma=1)
plt.figure()
plt.imshow(im_blur, cmap=plt.cm.Greys_r)
plt.show()
# Now, let's look at the image histogram to choose a threshold.
plt.figure()
plt.hist(im_blur.flatten(), bins=1000)
plt.xlabel('pixel value')
plt.ylabel('count')
plt.show()
# It seems pretty distinct what pixels correlate to our bead. Let's impose a
# threshold value of 0.2 and see how well it works.
threshold = 0.15
im_thresh = im_blur < threshold
plt.figure()
plt.imshow(im_thresh, cmap=plt.cm.Greys_r)
plt.show()
# That seems to do a pretty good job! However, there is an object that is touching the border of the image. Since we only want to get the bead segmented, we'll clear the segmentation of anything that is touching the border.
im_border = skimage.segmentation.clear_border(im_thresh)
# Now, we want to find the position of the middle of the bead. While it would
# be best to find the center to sub-pixel accuracy by fitting a two-dimensional
# gaussian, we'll find it by extracting the centroid from the regionprops
# measurement. We'll first label the image and then extract the properties.
im_label = skimage.measure.label(im_border)
props = skimage.measure.regionprops(im_label)
centroid_x, centroid_y = props[0].centroid
# Now, let's plot the position of the centroid on the image of our bead to see
# how well it worked. Remember that images are plotted y vs x, so we will need
# to plot the centroid_y first.
plt.figure()
plt.imshow(im_blur)
plt.plot(centroid_y, centroid_x, 'o')
plt.show()
# That's pretty good! Now, to determine the mean squared displacement, we
# will want to find the position of the bead at each frame. To do this, we'll
# loop through all of the images and perform the exact same operations.
centroid_x, centroid_y = [], [] # Empty storage lists.
for i in range(len(bead_ims)):
# Blur the image.
im_blur = skimage.filters.gaussian(bead_ims[i], sigma=1)
# Segment the image.
im_thresh = im_blur < threshold
# Clear the border.
im_border = skimage.segmentation.clear_border(im_thresh)
im_large = skimage.morphology.remove_small_objects(im_border)
# Label the image and extract the centroid.
im_lab = skimage.measure.label(im_large)
props = skimage.measure.regionprops(im_lab, intensity_image=np.invert(bead_ims[i]))
x, y = props[0].weighted_centroid
# Store the x and y centroid positions in the storage list.
centroid_x.append(x)
centroid_y.append(y)
# Now, let's generate some plots to see if our analysis makes sense. We'll
# plot the centroid x vs centroid y position to make sure that our bead seems
# to be diffusing in the expected manner. We'll also plot the x and y positions
# as a function to time to see if there are any bumps in the table or if our
# segmentation went awry.
plt.figure()
plt.plot(centroid_x, centroid_y, '-')
plt.xlabel('x position (pixels)')
plt.ylabel('y position (pixels)')
# Now plot them as a function of time.
time_vec = np.arange(0, len(bead_ims), 1) #* (1 / 50) # Converted to seconds.
plt.figure()
plt.plot(time_vec, centroid_x, '-')
plt.xlabel('time (s)')
plt.ylabel('x position (pixels)')
plt.figure()
plt.plot(time_vec, centroid_y, '-')
plt.xlabel('time (s)')
plt.ylabel('y position (pixels)')
plt.show()
# It looks like something gets bumped at frame 24 and then around 120. Let's
# restrict our analysis to that zone. That all looks good! It seems to be
# diffusing as expected. Now let's # calculate the mean squared displacement
# and compute the trap force.
ip_dist = 0.042 # Physical distance in units of microns per pixel
centroid_x_micron = np.array(centroid_x) * ip_dist
centroid_y_micron = np.array(centroid_y) * ip_dist
# Compute the means and msd.
mean_x = np.mean(centroid_x_micron[24:120])
mean_y = np.mean(centroid_y_micron[24:120])
msd_x = np.mean((centroid_x_micron[24:120] - mean_x)**2)
msd_y = np.mean((centroid_y_micron[24:120] - mean_y)**2)
# Compute the trap force.
kT = 4.1E-3 # In units of pN * micron
k_x = kT / msd_x
k_y = kT / msd_y
print('Trap force in x dimension is ' + str(k_x) + ' pN micron')
print('Trap force in y dimension is ' + str(k_y) + ' pN micron')
# Wow! That's a strong trap. Note that this is an approximation of the trap
# force. To precisely measure it, we should determine the centroid of the bead
# to sub-pixel accuracy.
| mit |
yufengg/tensorflow | tensorflow/examples/learn/wide_n_deep_tutorial.py | 12 | 7989 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
devanshdalal/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 112 | 3203 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
astorfi/TensorFlow-World | codes/3-neural_networks/undercomplete-autoencoder/code/autoencoder.py | 1 | 3507 | # An undercomplete autoencoder on MNIST dataset
from __future__ import division, print_function, absolute_import
import tensorflow.contrib.layers as lays
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 500 # Number of samples in each batch
epoch_num = 5 # Number of epochs to train the network
lr = 0.001 # Learning rate
def resize_batch(imgs):
# A function to resize a batch of MNIST images to (32, 32)
# Args:
# imgs: a numpy array of size [batch_size, 28 X 28].
# Returns:
# a numpy array of size [batch_size, 32, 32].
imgs = imgs.reshape((-1, 28, 28, 1))
resized_imgs = np.zeros((imgs.shape[0], 32, 32, 1))
for i in range(imgs.shape[0]):
resized_imgs[i, ..., 0] = transform.resize(imgs[i, ..., 0], (32, 32))
return resized_imgs
def autoencoder(inputs):
# encoder
# 32 x 32 x 1 -> 16 x 16 x 32
# 16 x 16 x 32 -> 8 x 8 x 16
# 8 x 8 x 16 -> 2 x 2 x 8
net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
# decoder
# 2 x 2 x 8 -> 8 x 8 x 16
# 8 x 8 x 16 -> 16 x 16 x 32
# 16 x 16 x 32 -> 32 x 32 x 1
net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
return net
# read MNIST dataset
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# calculate the number of batches per epoch
batch_per_ep = mnist.train.num_examples // batch_size
ae_inputs = tf.placeholder(tf.float32, (None, 32, 32, 1)) # input to the network (MNIST images)
ae_outputs = autoencoder(ae_inputs) # create the Autoencoder network
# calculate the loss and optimize the network
loss = tf.reduce_mean(tf.square(ae_outputs - ae_inputs)) # claculate the mean square error loss
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
# initialize the network
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for ep in range(epoch_num): # epochs loop
for batch_n in range(batch_per_ep): # batches loop
batch_img, batch_label = mnist.train.next_batch(batch_size) # read a batch
batch_img = batch_img.reshape((-1, 28, 28, 1)) # reshape each sample to an (28, 28) image
batch_img = resize_batch(batch_img) # reshape the images to (32, 32)
_, c = sess.run([train_op, loss], feed_dict={ae_inputs: batch_img})
print('Epoch: {} - cost= {:.5f}'.format((ep + 1), c))
# test the trained network
batch_img, batch_label = mnist.test.next_batch(50)
batch_img = resize_batch(batch_img)
recon_img = sess.run([ae_outputs], feed_dict={ae_inputs: batch_img})[0]
# plot the reconstructed images and their ground truths (inputs)
plt.figure(1)
plt.title('Reconstructed Images')
for i in range(50):
plt.subplot(5, 10, i+1)
plt.imshow(recon_img[i, ..., 0], cmap='gray')
plt.figure(2)
plt.title('Input Images')
for i in range(50):
plt.subplot(5, 10, i+1)
plt.imshow(batch_img[i, ..., 0], cmap='gray')
plt.show()
| mit |
WMD-group/MacroDensity | examples/FieldAtPoint.py | 1 | 4913 | #! /usr/bin/env python
# FieldAtPoint.py - try to calculate the electric field (grad of potential) at arbitrary point
# Forked form PlaneField.py - JMF 2016-01-25
import macrodensity as md
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors,cm #colour maps; so I can specify cube helix
import sys #for argv
## Input section (define the plane with 3 points, fractional coordinates)
a_point = [0, 0, 0]
b_point = [1, 0, 1]
c_point = [0, 1, 0]
#LOCPOT.CsPbI3_cubic LOCPOT.CsPbI3_distorted LOCPOT.MAPI_pseudocubic
#input_file = 'LOCPOT.CsPbI3_distorted'
input_file = sys.argv[1]
print("Input file ",input_file)
#------------------------------------------------------------------
# Get the potential
# This section should not be altered
#------------------------------------------------------------------
vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(input_file)
vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)
resolution_x = vector_a/NGX
resolution_y = vector_b/NGY
resolution_z = vector_c/NGZ
grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)
## Get the gradiens (Field), if required.
## Comment out if not required, due to compuational expense.
print("Calculating gradients (Electic field, E=-Grad.V )...")
grad_x,grad_y,grad_z = np.gradient(grid_pot[:,:,:],resolution_x,resolution_y,resolution_z)
#------------------------------------------------------------------
##------------------------------------------------------------------
## Get the equation for the plane
## This is the section for plotting on a user defined plane;
## uncomment commands if this is the option that you want.
##------------------------------------------------------------------
## Convert the fractional points to grid points on the density surface
a = md.numbers_2_grid(a_point,NGX,NGY,NGZ)
b = md.numbers_2_grid(b_point,NGX,NGY,NGZ)
c = md.numbers_2_grid(c_point,NGX,NGY,NGZ)
plane_coeff = md.points_2_plane(a,b,c)
## Calculate magnitude of gradient.
# Should be able to use numpy.linalg.norm for this, but the Python array indices are causing me grief
X2 = np.multiply(grad_x,grad_x)
Y2 = np.multiply(grad_y,grad_y)
Z2 = np.multiply(grad_z,grad_z)
grad_mag = np.sqrt(np.add(X2,Y2,Z2))
# This was my, non working, attempt to use the built in function.
#grad_mag=np.linalg.norm( [grad_y,grad_y,grad_z], axis=3)
## This function in Macrodensity averages Efield ACROSS Z for Slab calculations
#xx,yy,grd = pot.create_plotting_mesh(NGX,NGY,NGZ,plane_coeff,grad_mag) #AVG over full volume
# Here we construct the same xx,yy,grd variables with a SLICE, forming a plane in XY at particular ZSLICE
xx, yy = np.mgrid[0:NGX,0:NGY]
ZSLICE= NGZ/2 # Chosses where in the Z axis the XY slice is cut through
# Slice of magnitude of electric field, for contour plotting
grd=grad_mag[:,:,ZSLICE]
# Slices of x and y components for arrow plotting
grad_x_slice=grad_x[:,:,ZSLICE]
grad_y_slice=grad_y[:,:,ZSLICE]
# OK, that's all our data
# This code tiles the data to (2,2) to re-expand unit cell to a 2x2 supercell in XY
xx,yy=np.mgrid[0:2*NGX,0:2*NGY]
grd=np.tile(grd, (2,2))
grad_x_slice=np.tile(grad_x_slice, (2,2))
grad_y_slice=np.tile(grad_y_slice, (2,2))
# End of tiling code
## Contours of the above sliced data
plt.contour(xx,yy,grd,6,cmap=cm.cubehelix)
# Also generate a set of Efield arrows ('quiver') for this data.
# Specifying the drawing parameters is quite frustrating - they are very brittle + poorly documented.
plt.quiver(xx,yy, grad_x_slice, grad_y_slice,
color='grey',
units='dots', width=1, headwidth=3, headlength=4
) #,
# units='xy', scale=10., zorder=3, color='blue',
# width=0.007, headwidth=3., headlength=4.)
plt.axis('equal') #force square aspect ratio; this assuming X and Y are equal.
plt.show()
##------------------------------------------------------------------
##------------------------------------------------------------------
# CsPbI3 - distorted
PB_X=0.469972*NGX
PB_Y=0.530081*NGY
PB_Z=0.468559*NGZ
# CsPbI3 - perfect cubic
#PB_X=0.5*NGX
#PB_Y=0.5*NGY
#PB_Z=0.5*NGZ
# MAPBI3 - pseudo cubic distorted
#PB_X=0.476171*NGX
#PB_Y=0.500031*NGY
#PB_Z=0.475647*NGZ
# Read out massive grad table, in {x,y,z} components
print(grad_x[PB_X][PB_Y][PB_Z],grad_y[PB_X][PB_Y][PB_Z],grad_z[PB_X][PB_Y][PB_Z])
# Norm of electric field at this point
print(np.linalg.norm([ grad_x[PB_X][PB_Y][PB_Z],grad_y[PB_X][PB_Y][PB_Z],grad_z[PB_X][PB_Y][PB_Z] ]))
# OK, let's try this with a Spectral method (FFT)
# JMF - Not currently working; unsure of data formats, need worked example
from scipy import fftpack
V_FFT=fftpack.fftn(grid_pot[:,:,:])
V_deriv=fftpack.diff(grid_pot[:,:,:]) #V_FFT,order=1)
# Standard catch all to drop into ipython at end of script for variable inspection etc.
from IPython import embed; embed() # End on an interactive ipython console to inspect variables etc.
| mit |
depet/scikit-learn | sklearn/datasets/samples_generator.py | 2 | 50824 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import linalg
from ..preprocessing import LabelBinarizer
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in range(n_clusters):
C[k, :] *= generator.rand()
for f in range(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in range(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in range(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in range(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
return_indicator=False, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
if return_indicator:
lb = LabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples / 2), np.ones(n_samples / 2)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
DouglasLeeTucker/DECam_PGCM | bin/rawdata_clean_relevant_apass2mass_data.py | 1 | 3725 | #!/usr/bin/env python
"""
rawdata_clean_relevant_apass2mass_data.py
Example:
rawdata_clean_relevant_apass2mass_data.py --help
rawdata_clean_relevant_apass2mass_data.py --inputFile apass2mass_new_rawdata_rawdata.csv --outputFile apass2mass_new_y2a1_rawdata.u.csv.tmp --verbose 2
"""
##################################
def main():
import argparse
import time
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--inputFile', help='name of the input CSV file', default='input.csv')
parser.add_argument('--outputFile', help='name of the output CSV file', default='output.csv')
parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int)
args = parser.parse_args()
if args.verbose > 0: print args
status = clean_relevant_apass2mass_data(args)
return status
##################################
# clean_relevant_apass2mass_data
#
def clean_relevant_apass2mass_data(args):
import numpy as np
import os
import sys
import datetime
import fitsio
import pandas as pd
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'clean_relevant_apass2mass_data'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
inputFile = args.inputFile
outputFile = args.outputFile
# Read selected columns from inputFile...
columns = ['RA_WRAP','RAJ2000_APASS','DEJ2000_APASS','GMAG_APASS','RMAG_APASS','IMAG_APASS','JMAG_2MASS','HMAG_2MASS','KMAG_2MASS','g_des','r_des','i_des','z_des','Y_des']
print datetime.datetime.now()
print """Reading in selected columns from %s...""" % (inputFile)
df = pd.read_csv(inputFile, usecols=columns)
print datetime.datetime.now()
# Rename a couple columns...
df.rename(columns={'RAJ2000_APASS':'RA', 'DEJ2000_APASS':'DEC'}, inplace=True)
# Add APASS "g-r" column...
df.loc[:,'gr_apass'] = df.loc[:,'GMAG_APASS'] - df.loc[:,'RMAG_APASS']
# Transformation equation (20161123, from run of y2a1_new_u_from_apass2massGR.py):
# u_des = g_apass + 1.699*(g_apass-r_apass)**2 - 0.1106*(g_apass-r_apass) + 0.6307,
# which is appropriate for "0.2 <= (g-r)_apass <= 0.8".
# Use a signal alue of -9999 for stars with colors outside this range....
df.loc[:,'UMAG_DES'] = -9999.
mask1 = ( (df.GMAG_APASS > 0.0) & (df.RMAG_APASS > 0.0) )
mask2 = ( (df.gr_apass >= 0.2) & (df.gr_apass <= 0.8) )
mask_all = ( mask1 & mask2 )
df.loc[mask_all,'UMAG_DES'] = df.loc[mask_all,'GMAG_APASS'] + 1.699*df.loc[mask_all,'gr_apass']*df.loc[mask_all,'gr_apass'] - 0.1106*df.loc[mask_all,'gr_apass'] + 0.6307
# For the time being (until we've updated the transformation equations for these
# other filters), we'll keep the current values of GMAG_DES, RMAG_DES, IMAG_DES,
# ZMAG_DES, and YMAG_DES
df.loc[:,'GMAG_DES'] = df.loc[:,'g_des']
df.loc[:,'RMAG_DES'] = df.loc[:,'r_des']
df.loc[:,'IMAG_DES'] = df.loc[:,'i_des']
df.loc[:,'ZMAG_DES'] = df.loc[:,'z_des']
df.loc[:,'YMAG_DES'] = df.loc[:,'Y_des']
# Output results...
outcolumns = ['RA_WRAP','RA','DEC','GMAG_APASS','RMAG_APASS','IMAG_APASS','JMAG_2MASS','HMAG_2MASS','KMAG_2MASS','UMAG_DES','GMAG_DES','RMAG_DES','IMAG_DES','ZMAG_DES','YMAG_DES']
df.to_csv(outputFile, columns=outcolumns, index=False, float_format='%.6f')
return 0
##################################
if __name__ == "__main__":
main()
##################################
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/user_interfaces/pylab_with_gtk.py | 3 | 1419 | """
An example of how to use pylab to manage your figure windows, but
modify the GUI by accessing the underlying gtk widgets
"""
from __future__ import print_function
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
ax = plt.subplot(111)
plt.plot([1,2,3], 'ro-', label='easy as 1 2 3')
plt.plot([1,4,9], 'gs--', label='easy as 1 2 3 squared')
plt.legend()
manager = plt.get_current_fig_manager()
# you can also access the window or vbox attributes this way
toolbar = manager.toolbar
# now let's add a button to the toolbar
import gtk
next = 8; #where to insert this in the mpl toolbar
button = gtk.Button('Click me')
button.show()
def clicked(button):
print('hi mom')
button.connect('clicked', clicked)
toolitem = gtk.ToolItem()
toolitem.show()
toolitem.set_tooltip(
toolbar.tooltips,
'Click me for fun and profit')
toolitem.add(button)
toolbar.insert(toolitem, next); next +=1
# now let's add a widget to the vbox
label = gtk.Label()
label.set_markup('Drag mouse over axes for position')
label.show()
vbox = manager.vbox
vbox.pack_start(label, False, False)
vbox.reorder_child(manager.toolbar, -1)
def update(event):
if event.xdata is None:
label.set_markup('Drag mouse over axes for position')
else:
label.set_markup('<span color="#ef0000">x,y=(%f, %f)</span>'%(event.xdata, event.ydata))
plt.connect('motion_notify_event', update)
plt.show()
| mit |
rishikksh20/scikit-learn | sklearn/neighbors/regression.py | 26 | 10999 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
automl/ChaLearn_Automatic_Machine_Learning_Challenge_2015 | 003_grigoris.py | 1 | 6183 | import argparse
import os
from joblib import Parallel, delayed
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.competition_data_manager
from autosklearn.pipeline.classification import SimpleClassificationPipeline
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'grigoris'
output = args.output
path = os.path.join(input, dataset)
D = autosklearn.data.competition_data_manager.CompetitionDataManager(path)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Replace the following array by a new ensemble
choices = \
[(0.720000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 0.0665747065156058,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 0.002362381246384099,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0972585384393519,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'normalize'})),
(0.100000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 7.705276414124367,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 0.028951969755081776,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'normalize'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 1.0,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 0.0001,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.0033856971814438443,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'normalize'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 0.2598769185905466,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 0.001007160236770467,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.019059927375795167,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'normalize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 0.6849477125990308,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 1.2676147487949745e-05,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.003803817610653382,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'normalize'})),
]
targets = []
predictions = []
predictions_valid = []
predictions_test = []
def fit_and_predict(estimator, weight, X, y):
try:
estimator.fit(X.copy(), y.copy())
pv = estimator.predict_proba(X_valid.copy()) * weight
pt = estimator.predict_proba(X_test.copy()) * weight
except Exception as e:
print(e)
print(estimator.configuration)
pv = None
pt = None
return pv, pt
# Make predictions and weight them
all_predictions = Parallel(n_jobs=-1)(delayed(fit_and_predict) \
(estimator, weight, X, y) for
weight, estimator in choices)
for pv, pt in all_predictions:
predictions_valid.append(pv)
predictions_test.append(pt)
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0).astype(np.float32)
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ', fmt='%.4e')
| bsd-2-clause |
lbishal/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
gclenaghan/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
grhawk/ASE | tools/ase/calculators/ase_qmmm_manyqm.py | 4 | 60409 | """QM/MM interface with QM=FHI-aims, MM=gromacs
QM could be something else, but you need to read in qm-atom charges
from the qm program (in method 'get_qm_charges')
One can have many QM regions, each with a different calculator.
There can be only one MM calculator, which is calculating the whole
system.
Non-bonded interactions:
------------------------
Generally:
Within the same QM-QM:
by qm calculator
MM-MM:
by MM calculator
QM-MM:
by MM using MM vdw parameters and QM charges.
Different QM different QM:
by MM using QM and MM charges and MM-vdw parameters
The Hirschfeld charges (or other atomic charges)
on QM atoms are calculated by QM in a H terminated cluster in vacuum.
The charge of QM atom next to MM atom (edge-QM-atom)
and its H neighbors are set as in the classical force field.
The extra(missing) charge results from:
1) linkH atoms
2) The edge-QM atoms, and their qm-H neighbors,
have their original MM charges.
3) and the fact that the charge of the QM fraction
is not usually an integer when using the original MM charges.
It is added equally to all QM atoms
(not being linkH and not being edge-QM-atom or its H neighbor)
so that the total charge of the MM-fragment involving QM atoms
will be the same as in the original MM-description.
Vdw interactions are calculated by MM-gromacs for MM and MM-QM inteactions.
The QM-QM vdw interaction s could be done by the FHI-aims if desired
(by modifying the imput for QM-FHI-aims input accordingly.
Bonded interactions::
E=
E_qm(QM-H) ; qm energy of H terminated QM cluster(s)
+ E_mm(ALL ATOMS) ; mm energy of all atoms,
; except for terms in which all MM-interacting atoms are
; in the same QM region
Forces do not act on link atoms but they are positioned by scaling.
Forces on link atoms are given to their QM and MM neighbors by chain rule.
(see J. Chem. Theory Comput. 2011, 7, 761-777).
The optimal edge-qm-atom-linkH bond length is calculated
by QM in 'get_eq_qm_atom_link_h_distances'
or they are read from a file.
Questions & Comments [email protected]
I'm especially interested in cases when we need two or more
QM regions. For instance two redox centers in a protein,
cathode and anode of a fuel cell ... you name it!
Some things to improve:
1) Water topology issue (at the moment water cannot be in QM),
Its topology should be put into the main
topology file, not in a separate file.
2) point charges and periodicity (if desired) to the QM calculation
(now in vacuum)
3) Eichinger type of link atom treatment with fitted force constants for
linkH-QMedge (bond strecth)
linkH-QMedge-QMnextTOedge (angle terms)
4) file io using unformatted formats (.trr) instead of g96
This is not easily possible without loading extra stuff from
ftp://ftp.gromacs.org/pub/contrib/xd...e-1.1.1.tar.gz.
5) Utilize gromacs-python wrapper: (just found this today 31.12.2012...)
http://orbeckst.github.com/GromacsWrapper/index.html#
"""
import sys
import numpy as np
def get_neighbor_list(system):
"""
Makes a neighbor list of a system (ase Atoms).
See
https:\
//wiki.fysik.dtu.dk/ase/ase/calculators/calculators.html#module-calculators
"""
from ase.calculators.neighborlist import NeighborList
from ase.data import covalent_radii
import os
import pickle
NEIGHBOR_FILE = 'neighbor_list_for_ase_qmmm.txt'
if os.path.exists(NEIGHBOR_FILE):
print('Reading qm/mm neighbor list from file:')
print('neighbor_list_for_ase_qmmm.txt')
myfile = open(NEIGHBOR_FILE, 'r')
neighbor_list = pickle.load(myfile)
else:
cut = [covalent_radii[atom.number] for atom in system]
skin = [0.2 for atom in system]
neighbor_list = NeighborList(cut, skin, \
self_interaction=False, bothways=True)
neighbor_list.update(system)
file = open(NEIGHBOR_FILE, 'w')
pickle.dump(neighbor_list, file)
file.close()
return neighbor_list
def get_qm_atoms(indexfilename='index.ndx'):
"""
Read the indexes of all QM atoms (there may be many QM regions)
"""
infile = open(indexfilename,'r')
lines = infile.readlines()
infile.close()
qms = []
for iline, line in enumerate(lines):
if (('[ QM' in line) or ('[ qm' in line) or ('[ Qm' in line)) \
or (('[QM' in line) or ('[qm' in line) or ('[Qm' in line)):
qm = []
for checkline in lines[iline+1:]:
if ('[') in checkline:
break
else:
qm = qm + [int(float(s)-1.0) for s in \
checkline.split() if s.isdigit()]
qm = list(set(qm))
qms.append(qm)
return qms
class LinkAtom:
"""
Class for information about a single link-atom
(it terminates a QM cluster)
qm_region_index and link_atom_index refer to the following indexing system:
[[QM0 link atoms indexes from 0],[QM1 link atoms indexes from 0],...]
So above the second link atom in second qm region would have
qm_region_index=1, link_atom_index=1
link_atom_index_in_qm tells which index in qm system the link atom has
for instance
qm_region_index=1, link_atom_index_in_qm=20
means that link atom is 21'st atom in the second qm system
"""
def __init__(self, atom, qm_region_index, link_atom_index):
""" set initial values to a link atom object """
self.atom = atom
self.qm_region_index = qm_region_index
self.link_atom_index = link_atom_index
self.link_atom_index_in_qm = None
self.qm_neighbor = None
self.mm_neighbor = None
self.qm2_neighbors = []
self.qm3_neighbors = []
self.mm2_neighbors = []
self.set_qm2_neighbors = set([])
self.set_qm3_neighbors = set([])
self.set_mm2_neighbors = set([])
self.force_constant = 0.0
self.equilibrium_distance_xh = 0.0
self.equilibrium_distance_xy = 0.0
def set_link_atom(self, atom):
""" set an ase-atom to be the link atom """
self.atom = atom
def set_link_atom_qm_region_index(self, qm_region_index):
""" set to which qm region the link atom belongs to """
self.qm_region_index = qm_region_index
def set_link_atom_index_in_qm(self, link_atom_index_in_qm):
""" set what is my link atom index in this qm region """
self.link_atom_index_in_qm = link_atom_index_in_qm
def set_link_atom_qm_neighbor(self, qm_neighbor):
""" set what index does my qm neighbor have"""
self.qm_neighbor = qm_neighbor
def set_link_atom_mm_neighbor(self, mm_neighbor):
""" set what index does my mm neighbor have"""
self.mm_neighbor = mm_neighbor
def set_link_atom_qm2_neighbors(self, qm2_neighbors):
""" set what index does my second qm neighbor have"""
self.qm2_neighbors = qm2_neighbors
def set_link_atom_qm3_neighbors(self, qm3_neighbors):
""" set what index does my third qm neighbor have"""
self.qm3_neighbors = qm3_neighbors
def set_link_atom_mm2_neighbors(self, mm2_neighbors):
""" set what index does my second mm neighbor have"""
self.mm2_neighbors = mm2_neighbors
def set_force_constant(self, force_constant):
""" set the force constant of bond edge-qm -- linkH (not used)"""
self.force_constant = force_constant
def set_equilibrium_distance_xh(self, equilibrium_distance_xh):
""" set the equilibrium edge-qm -- linkH distance """
self.equilibrium_distance_xh = equilibrium_distance_xh
def set_equilibrium_distance_xy(self, equilibrium_distance_xy):
"""set the equilibrium edge-qm --
edge-mm distance (by MM-force field)"""
self.equilibrium_distance_xy = equilibrium_distance_xy
def get_link_atom(self):
""" get an ase-atom to be the link atom """
return self.atom
def get_link_atom_qm_region_index(self):
""" get to which qm region the link atom belongs to """
return self.qm_region_index
def get_link_atom_index_in_qm(self):
""" get what is my link atom index in this qm region """
return self.link_atom_index_in_qm
def get_link_atom_qm_neighbor(self):
""" get what index does my qm neighbor have"""
return self.qm_neighbor
def get_link_atom_mm_neighbor(self):
""" get what index does my mm neighbor have"""
return self.mm_neighbor
def get_link_atom_qm2_neighbors(self):
""" get what index does my second qm neighbor have"""
return self.qm2_neighbors
def get_link_atom_qm3_neighbors(self):
""" get what index does my third qm neighbor have"""
return self.qm3_neighbors
def get_link_atom_mm2_neighbors(self):
""" get what index does my second mm neighbor have"""
return self.mm2_neighbors
def get_force_constant(self):
""" get the force constant of bond edge-qm -- linkH (not used)"""
return self.force_constant
def get_equilibrium_distance_xh(self):
""" get the equilibrium edge-qm -- linkH distance """
return self.equilibrium_distance_xh
def get_equilibrium_distance_xy(self):
"""get the equilibrium edge-qm --
edge-mm distance (by MM-force field)"""
return self.equilibrium_distance_xy
class AseQmmmManyqm:
""" This is a qm/mm interface with qm=FHI-aims, mm=gromacs.
We can have many QM regions, each with a different calculator.
There can be only one MM calculator, which is calculating the whole
system.
Numeration of atoms starts from 0. (in qms, mms)
In qm calculations link atom(s) come(s) last.
For any qm region, the optimal bond lengths for all edge_atom-link_atom
pairs are optimized by QM simultaneously at the beginning of
the run when the flag link_info='byQM' is used (by method . The positions of other a
"""
def __init__(self, nqm_regions, \
qm_calculators, mm_calculator, \
link_info='byQM'):
""" Set initial values to each qm and mm calculator.
Additionally set information for the qm/mm interface.
The information about qm and mm indexes is read from
a file 'index.ndx'
Which can be generated with a gromacs tool 'make_ndx'
http://www.gromacs.org/Documentation/Gromacs_Utilities/make_ndx
Parameters
==========
nqm_regions: int
how many qm regions
qm_calculators: list members of a Class defining a Calculator
ase-qm calculator for each qm region
mm_calculator: a member of a Class defining a Calculator
ase-mm calculator for mm (the whole system)
link_info: str
can be either
'byQM': the edge_qm_atom-link_h_atom distances are calculated by QM
'byFile':the edge_qm_atom-link_h_atom distances are read from a file
"""
from ase.io import read, write
import os, glob
# clean
files = glob.glob('test-*')
for file in files:
try:
os.remove(file)
except OSError:
pass
self.atoms = None
self.positions = None
self.neighbor_list = None
self.link_atoms = []
self.energy = None
self.e_delta_stretch = None
self.nqm_regions = nqm_regions
self.qm_calculators = qm_calculators
self.mm_calculator = mm_calculator
self.qmatom_types = []
self.mmatom_types = []
#det unique name for each qm region
# (the output file of each qm calculation)
for i in range(len(self.qm_calculators)):
self.qm_calculators[i].set(output_template = 'aims'+str(i))
self.link_systems = None
self.equilibrium_distances_xy = []
self.equilibrium_distances_xh = []
self.force_constants = []
# get the sets of qm atoms
self.qms = get_qm_atoms()
self.set_qms = set(sum(self.qms, []))
print('qmsystem(s), indexing from 0:')
print('')
for index_out in self.qms:
index_str = ''
for index in index_out:
index_str += str(index) + ' '
print ('%s' % index_str)
print('')
if ( len(self.qms) != nqm_regions):
print ('Number of set of QM atoms does not match with nqm_regions')
print ('self.qms %s' % str(self.qms))
print ('nqm_regions %s' % str(nqm_regions))
sys.exit()
if ( len(self.qms) != len(qm_calculators)):
print ('Number of set of QM atoms does not match with')
print ('the number of QM calculators')
sys.exit()
#read the actual structure to define link atoms and their neighbors
system_tmp = mm_calculator.atoms
self.positions = system_tmp.get_positions()
#get neighbor lists
self.neighbor_list = get_neighbor_list(system_tmp)
#get the mm-atoms next to link atoms for all qm regions
(self.mms_edge, self.qms_edge, self.set_mms_edge, self.set_qms_edge) = \
self.get_edge_qm_and_mm_atoms(self.qms, system_tmp)
#get the mm atoms being second neighbors to any qm atom
(self.second_mms, self.set_second_mms) = \
self.get_next_neighbors(self.mms_edge, self.set_qms)
#get the qm atoms being second neighbors to link atom
(self.second_qms, self.set_second_qms) = \
self.get_next_neighbors(self.qms_edge, \
self.set_mms_edge)
#get the qm atoms being neighbors to link atom (edge-qm atoms)
# and their neighbors which have only single neighbor
# (for example edge-QM(C)-H or edge-QM(C)=O; for charge exclusion)
self.constant_charge_qms = \
self.get_constant_charge_qms\
(self.set_qms_edge, self.set_second_qms)
#get the qm atoms being third neighbors to link atom
(self.third_qms, self.set_third_qms) = \
self.get_next_neighbors\
(self.second_qms, self.set_qms_edge)
print('self.qms %s' % self.qms)
print('QM edge, MM edge %s' \
% str(self.qms_edge)+' '+ str(self.mms_edge))
print('MM second N of Link %s' % str(self.second_mms))
print('QM second N of Link %s' % str(self.second_qms))
print('QM third N of Link %s' % str(self.third_qms))
if link_info == 'byFILE':
self.read_eq_distances_from_file()
else:
#get QM-MM bond lengths
self.get_eq_distances_xy(\
topfilename=mm_calculator.topology_filename,\
force_field= mm_calculator.force_field)
#get QM-linkH distances by QM for all link atoms
self.get_eq_qm_atom_link_h_distances(system_tmp)
# write current link-info data to file (it can be later used,
# so XH bondconstants are already calculated by QM
# Also one can manually change the XY bond lengths
self.write_eq_distances_to_file(\
self.qms_edge)
#get target charge of each qm-region
self.classical_target_charge_sums = \
self.get_classical_target_charge_sums\
(self.mm_calculator.topology_filename, self.qms)
#get a list of link H atoms
self.link_atoms = self.get_link_atoms(\
self.qms_edge, self.mms_edge,\
self.force_constants,\
self.equilibrium_distances_xh, \
self.equilibrium_distances_xy)
self.qmsystems = self.define_QM_clusters_in_vacuum(system_tmp)
for iqm, qm in enumerate(self.qmsystems):
write('test-qm-'+str(iqm)+'.xyz', qm)
#attach calculators to qm regions
for iqm, qm in enumerate(self.qmsystems):
self.qmsystems[iqm].set_calculator(self.qm_calculators[iqm])
#attach calculators to the mm region (the whole system)
self.mm_system = system_tmp
self.mm_system.set_calculator(self.mm_calculator)
#initialize total energy and forces of qm regions
#and the mm energy
self.qm_energies = []
self.qm_forces = []
self.qm_charges = []
self.sum_qm_charge = []
for iqm, qm in enumerate(self.qmsystems):
self.qm_energies.append(0.0)
self.qm_forces.append(None)
self.qm_charges.append(None)
self.sum_qm_charge.append(None)
self.mm_energy = None
#set initial zero forces
self.forces = np.zeros((len(self.positions), 3))
self.charges = np.zeros((len(self.positions), 1))
try:
os.remove(self.mm_calculator.topology_filename+'.orig')
except:
pass
print('%s' % str(self.mm_calculator.topology_filename))
os.system('cp ' + self.mm_calculator.topology_filename + ' ' +\
self.mm_calculator.topology_filename + '.orig')
#remove some classical bonded interaction in the topology file
# this need to be done only once, because the bond topology
# is unchanged during a QM/MM run
#(QM charges can be updated in the topology, however)
# the original topology is generated when calling Gromacs(
# in the main script setting up QM, MM and minimization
if (self.mm_calculator.name == 'Gromacs'):
self.kill_top_lines_containing_only_qm_atoms\
(self.mm_calculator.topology_filename, self.qms, \
self.mm_calculator.topology_filename)
else:
print('Only Gromacs MM-calculator implemented in ASE-QM/MM')
sys.exit()
#exclude qm-qm non-bonded interactions in MM-gromacs
self.add_exclusions()
#generate input file for gromacs run
self.mm_calculator.generate_gromacs_run_file()
######### end of Init #####################################
def get_forces(self, atoms):
"""get forces acting on all atoms except link atoms """
self.update(atoms)
return self.forces
def get_potential_energy(self, atoms):
""" get the total energy of the MM and QM system(s) """
self.update(atoms)
return self.energy
def update(self, atoms):
"""Updates and does a check to see if a calculation is required"""
if self.calculation_required(atoms):
# performs an update of the atoms and qm systems
self.atoms = atoms.copy()
self.positions = atoms.get_positions()
self.mm_system = atoms.copy()
#get the positions of link H atoms
self.link_atoms = self.get_link_atoms(\
self.qms_edge, self.mms_edge,\
self.force_constants,\
self.equilibrium_distances_xh, \
self.equilibrium_distances_xy)
#get QM systens
self.qmsystems = self.define_QM_clusters_in_vacuum(\
self.atoms)
self.calculate(atoms)
def calculation_required(self, atoms):
"""Checks if a calculation is required"""
if ((self.positions is None) or
(self.atoms != atoms) or
(self.energy is None)):
return True
return False
def calculate_mm(self):
""" Calculating mm energies and forces """
import os
mm = self.atoms
mm.set_calculator(self.mm_calculator)
if (self.mm_calculator.name == 'Gromacs'):
try:
os.remove(self.mm_calculator.base_filename+'.log')
except:
pass
self.mm_calculator.update(mm)
self.mm_energy = 0
self.mm_energy += mm.get_potential_energy()
self.forces += mm.get_forces()
def calculate_qms(self):
""" QM calculations on all qm systems are carried out """
for iqm, qm in enumerate(self.qmsystems):
qm.set_calculator(self.qm_calculators[iqm])
self.qm_energies[iqm] = qm.get_potential_energy()
self.qm_forces[iqm] = np.zeros((len(qm), 3))
self.qm_forces[iqm] = qm.get_forces()
(self.sum_qm_charge[iqm], self.qm_charges[iqm]) = \
self.get_qm_charges(iqm,
number_of_link_atoms =\
len(self.qms_edge[iqm]))
if (len(self.qms[iqm]) != len(self.qm_charges[iqm])):
print('Problem in reading charges')
print('len(self.qms[iqm]) %s' % str(len(self.qms[iqm])))
print('len(self.qm_charges[iqm]) %s' \
% str(len(self.qm_charges[iqm])))
print('Check the output of QM program')
print('iqm, qm %s' % str(iqm)+ ' '+ str(qm))
print('self.qm_charges[iqm] %s' % str(self.qm_charges[iqm]))
sys.exit()
def calculate_single_qm(self, myqm, mycalculator):
""" Calculate the qm energy of a single qm region
(for X-H bond length calculations)
"""
myqm.set_calculator(mycalculator)
return myqm.get_potential_energy()
def run(self, atoms):
"""Runs QMs and MM"""
self.forces = np.zeros((len(atoms), 3))
self.calculate_qms()
# update QM charges to MM topology file
self.set_qm_charges_to_mm_topology()
#generate gromacs run file (.tpr) base on new topology
self.mm_calculator.generate_gromacs_run_file()
self.calculate_mm()
def calculate(self, atoms):
"""gets all energies and forces (qm, mm, qm-mm and corrections)"""
self.run(atoms)
self.energy = sum(self.qm_energies)+self.mm_energy
#map the forces of QM systems to all atoms
#loop over qm regions
for qm, qm_force in zip(self.qms, self.qm_forces):
#loop over qm atoms in a qm region
#set forces to the all-atom set (the all atom set does not
# have link atoms)
for iqm_atom, qm_atom in enumerate(qm):
self.forces[qm_atom] = self.forces[qm_atom] + \
qm_force[iqm_atom]
self.get_link_atom_forces(action = 'QM')
def get_link_atoms(self, qm_links, mm_links, \
force_constants,\
equilibrium_distances_xh, equilibrium_distances_xy):
"""
QM atoms can be bonded to MM atoms. In this case one sets
an extra H atom (a link atom).
The positions of the all link H atoms in all qm regions are
set along QM-MM and bond with length defined by:
J. Chem. Theory Comput 2011, 7, 761-777, Eq 1
r_XH = r_XY_current*(r_XH_from_qm_calculation /r_XY_from_forceField)
"""
import math
from ase import Atom
link_hs = []
for i_qm_region, (qm0, mm0) in enumerate (zip( qm_links, mm_links)):
for i_link_atom, (qmatom, mmatom) in enumerate (zip(qm0, mm0)):
dx = (self.positions[mmatom, 0] - self.positions[qmatom, 0])
dy = (self.positions[mmatom, 1] - self.positions[qmatom, 1])
dz = (self.positions[mmatom, 2] - self.positions[qmatom, 2])
d = math.sqrt(dx* dx+ dy* dy+ dz* dz)
unit_x = dx/ d
unit_y = dy/ d
unit_z = dz/ d
xh_bond_length = \
d*\
self.equilibrium_distances_xh[i_qm_region][i_link_atom]/\
self.equilibrium_distances_xy[i_qm_region][i_link_atom]
posh_x = self.positions[qmatom, 0] + unit_x* xh_bond_length
posh_y = self.positions[qmatom, 1] + unit_y* xh_bond_length
posh_z = self.positions[qmatom, 2] + unit_z* xh_bond_length
tmp_link_h = (Atom('H', position=(posh_x, posh_y, posh_z)))
link_h = LinkAtom(atom=tmp_link_h, \
qm_region_index = i_qm_region,\
link_atom_index = i_link_atom)
link_h.set_link_atom_qm_neighbor(qmatom)
link_h.set_link_atom_mm_neighbor(mmatom)
link_h.set_force_constant(\
force_constants[i_qm_region][i_link_atom])
link_h.set_equilibrium_distance_xh(equilibrium_distances_xh\
[i_qm_region][i_link_atom])
link_h.set_equilibrium_distance_xy(equilibrium_distances_xy\
[i_qm_region][i_link_atom])
link_hs.append(link_h)
return (link_hs)
def get_link_atom_forces(self, action):
""" Add forces due to link atom to QM atom
and to MM atom next to each link atom.
Top Curr Chem (2007) 268: 173-290
QM/MM Methods for Biological Systems
Hans Martin Senn and Walter Thiel
Eqs. 10(p192), 12(p193), 16a, 16b(p 194)
"""
for link_atom in self.link_atoms:
i_qm_atom = link_atom.qm_neighbor
i_mm_atom = link_atom.mm_neighbor
i_qm_region = link_atom.qm_region_index
link_atom_index_in_qm = link_atom.get_link_atom_index_in_qm()
if (action == 'QM'):
force_of_h = self.qm_forces[i_qm_region][link_atom_index_in_qm]
elif (action == 'MM'):
force_of_h = link_atom.mm_force
else:
print('not implemented in get_link_atom_forces')
sys.exit()
g = link_atom.equilibrium_distance_xh/\
link_atom.equilibrium_distance_xy
self.forces[i_mm_atom, 0] = self.forces[i_mm_atom, 0] +\
force_of_h[0] * g
self.forces[i_mm_atom, 1] = self.forces[i_mm_atom, 1] +\
force_of_h[1] * g
self.forces[i_mm_atom, 2] = self.forces[i_mm_atom, 2] +\
force_of_h[2] * g
self.forces[i_qm_atom, 0] = self.forces[i_qm_atom, 0] +\
force_of_h[0] * (1.0 - g)
self.forces[i_qm_atom, 1] = self.forces[i_qm_atom, 1] +\
force_of_h[1] * (1.0 - g)
self.forces[i_qm_atom, 2] = self.forces[i_qm_atom, 2] +\
force_of_h[2] * (1.0 - g)
def add_energy_exclusion_group(self, indexfilename='index.ndx'):
"""
Add energy exclusions for MM calculations.
This is the way to block non-bonded MM (coulomb&vdW)
interactions within a single QM region.
"""
infile = open(indexfilename,'r')
lines = infile.readlines()
infile.close()
qm_region_names = []
for line in lines:
if (('QM' in line) or ('Qm' in line) or ('qm' in line)):
qm_region_names.append(line.split()[1])
infile = open(self.mm_calculator.base_filename+'.mdp','r')
lines = infile.readlines()
infile.close()
outfile = open(self.mm_calculator.base_filename+'.mdp','w')
for line1 in lines:
outfile.write(line1)
outfile.write(';qm regions should not MM-interact with themselves \n')
outfile.write(';but separate qm regions MM-interact with each other \n')
outfile.write('energygrps = ')
for name in qm_region_names:
outfile.write(name + ' ')
outfile.write('\n')
outfile.write('energygrp_excl = ')
for name in qm_region_names:
outfile.write(name + ' ' + name + ' ')
outfile.write('\n')
outfile.close()
return
def add_exclusions(self):
"""
Add energy exclusions for MM calculations.
This is the way to block non-bonded MM (coulomb&vdW)
interactions within a single QM region.
"""
infile = open(self.mm_calculator.topology_filename,'r')
lines = infile.readlines()
infile.close()
outfile = open(self.mm_calculator.topology_filename,'w')
for line in lines:
if '[ angle' in line:
outfile.write('\n')
outfile.write('[ exclusions ] \n')
outfile.write(\
'; qm regions should not MM-interact with themselves \n')
outfile.write(\
'; but separate qm regions MM-interact with each other \n')
for qm_region in self.qms:
for qm_atom1 in qm_region:
outfile.write(str(qm_atom1 + 1) + ' ')
for qm_atom2 in qm_region:
if qm_atom1 != qm_atom2:
outfile.write(str(qm_atom2 + 1) + ' ')
outfile.write('\n')
outfile.write('\n')
outfile.write(line)
outfile.close()
return
def get_qm_charges(self, i_current_qm, calculator='Aims',
number_of_link_atoms = 0):
"""
Get partial charges on QM atoms.
The charges at link atoms are not returned.
"""
if calculator == 'Aims':
infile = open('aims'+str(i_current_qm)+'.out','r')
lines = infile.readlines()
infile.close()
qm_charges = []
for line in lines:
if ('Hirshfeld charge ' in line):
qm_charges.append(float(line.split()[4]))
sum_qm_charges = sum(qm_charges)
#delete charges of link atoms
if (number_of_link_atoms > 0):
del qm_charges[-number_of_link_atoms:]
return sum_qm_charges, qm_charges
def get_topology_lines(self, lines):
""" Get lines including charges of atoms (ok_lines)
also comments in these lines (comment_lines)
and lines before and after these lines
(lines_before and lines_after)
"""
lines_before = []
lines_change = []
lines_after = []
do_lines_before = True
do_lines_change = False
for line in lines:
if (' bonds ') in line:
do_lines_change = False
if do_lines_before:
lines_before.append(line)
elif do_lines_change:
lines_change.append(line)
else:
lines_after.append(line)
if (' atoms ') in line:
do_lines_before = False
do_lines_change = True
#kill comments and empty lines,
#get the charge in the topology file
comment_lines = []
lines_ok = []
for iline in range(len(lines_change)):
if lines_change[iline].startswith(';'):
comment_lines.append(lines_change[iline])
elif not lines_change[iline].strip():
pass
else:
try:
#new charge = float(lines_change[iline].split()[6])
#new charge_orig = charge_orig + charge
#top_charge.append(charge)
lines_ok.append(lines_change[iline])
except:
print('error in reading gromacs topology')
print('line is')
print('%s' % lines_change[iline])
sys.exit()
return lines_before, comment_lines, lines_ok, lines_after
def set_qm_charges_to_mm_topology(self):
""" Set qm charges to qm atoms of MM topology based on
a QM calculation.
1) The charges of link atoms are neglected.
2) The charge of a qm atom next to the link atom is set to be the
same value as in the original topology file. (trying to
avoid the artificial polarization due to qmAtom-linkH).
3) the total charge of the system (all QM and MM atoms) should be
the same as in the original classical system. Therefore, all the
QM atoms will gain/loose an equal amount of charge in the MM topology
file.
"""
infile = open(self.mm_calculator.topology_filename,'r')
lines = infile.readlines()
infile.close()
(lines_before, comment_lines, lines_ok, lines_after) = \
self.get_topology_lines(lines)
#check that the atom numering is ok
for iline in range(len(lines_ok)):
atom_nr = iline + 1
if int(lines_ok[iline].split()[0]) != atom_nr:
print('2: error in reading gromacs topology')
print('line is')
print('%s' % lines_ok[iline])
sys.exit()
# get the total charge of non-link H atoms in the current qm system
# The charges of edge atoms and their H neighbors
# are taken from topology
# (they are unchanged, it is not from QM calculations)
for iqm, qm in enumerate(self.qms):
charges = self.qm_charges[iqm]
charges_ok = charges
qm_charge_no_link_edge_mm = 0.0
n_qm_charge_atoms = 0
for qm_atom, charge in zip(qm, charges):
if qm_atom not in self.constant_charge_qms:
qm_charge_no_link_edge_mm = \
qm_charge_no_link_edge_mm + charge
n_qm_charge_atoms = n_qm_charge_atoms + 1
# correct the total charge to be equal the original one
# in the topology file by
# adding/ substracting missing/extra charge on
# non-edge and non-single neighbor next neib QM atoms
change_charge = \
( self.classical_target_charge_sums[iqm] - \
qm_charge_no_link_edge_mm)/\
float(n_qm_charge_atoms)
for iqmatom, qmatom in enumerate(qm):
if qmatom not in self.constant_charge_qms:
charges_ok[iqmatom] = charges[iqmatom] + change_charge
# set qm charges to the lines of gromacs topology file
for iqmatom, qmatom in enumerate(qm):
if qmatom not in self.constant_charge_qms:
lines_ok[qmatom] = \
lines_ok[qmatom][0:45]\
+str(round((charges_ok[iqmatom]),5)).rjust(11)+\
lines_ok[qmatom][56:70]
# write out the new topology file
sum_charge = 0.0
for iline in range(len(lines_ok)):
sum_charge = sum_charge + float(lines_ok[iline][46:56])
comment = '; qtot '+str(round(sum_charge,4))+'\n'.ljust(12)
outfile = open(self.mm_calculator.topology_filename, 'w')
for line in lines_before:
outfile.write(line)
for line in comment_lines:
outfile.write(line)
sum_charge = 0.0
for line in lines_ok:
sum_charge = sum_charge + float(line[46:56])
comment = '; qtot '+str(round(sum_charge,4)).ljust(11)+'\n'
outfile.write(line[0:70]+comment)
outfile.write('\n')
for line in lines_after:
outfile.write(line)
outfile.close()
#------------------------------------------------------------------
#------Below the stuff needed for initializing the QM/MM system ---
#------Setting up link atoms, defining QM and MM regions ----------
#------------------------------------------------------------------
def get_edge_qm_and_mm_atoms(self, qms, system):
""" Get neighbors of QM atoms (MM-link-atoms) that are not in QM
(there may be many QM regions)
edge-QM atom can NOT be neighbored by H atom(s)
also get edge-QM atoms
"""
masses = system.get_masses()
mms1 = []
qms1 = []
setmms1 = set([])
setqms1 = set([])
for qm in qms:
link_mm_atoms = []
link_qm_atoms = []
for qm_atom in qm:
indices, offsets = self.neighbor_list.get_neighbors(qm_atom)
for neib_atom in indices:
if neib_atom not in qm:
link_mm_atoms.append(neib_atom)
#take unique atoms of flattened list
link_mm_atoms = list(set(link_mm_atoms))
# Kill MM atoms that are H atoms in the neighborlist
oklink_mm_atoms = []
for index in link_mm_atoms:
if masses[index] > 1.5:
oklink_mm_atoms.append(index)
else:
print('WARNING:')
print('qm system cannot be bond to H atoms')
print('problem atom index is (numbering from 1): %s' \
% str(index+1))
print('if this is water H you should consider including it')
print('in QM')
#sys.exit()
#get indexes of QM edge atoms,
# one qm atom can be more then one time an edge atom
# (then this QM atom will have more than one link atoms)
for link_mm_atom in oklink_mm_atoms:
indices, offsets = \
self.neighbor_list.get_neighbors(link_mm_atom)
for neib_atom in indices:
if neib_atom in qm:
link_qm_atoms.append(neib_atom)
mms1.append(oklink_mm_atoms)
qms1.append(link_qm_atoms)
setmms1 |= set(oklink_mm_atoms)
setqms1 |= set(link_qm_atoms)
return mms1, qms1, setmms1, setqms1
def get_next_neighbors(self, atom_indexes, prohibited_set):
""" Get neighbors of all atoms in 'atom_indexes'
that are not in 'prohibited_set'.
'atom_indexes' is a list of list in which atom indexes belonging
of each QM region is a separate list, that is
[[QM1 atom_indexes], [QM2 atom_indexes], ...]
"""
list_neibs = []
set_list_neibs = set([])
for current_atoms in atom_indexes:
neibs = []
set_current_atoms = set(current_atoms)
for current_atom in current_atoms:
indices, offsets = \
self.neighbor_list.get_neighbors(current_atom)
setneib = set(indices)
neibs += list(setneib - set_current_atoms-prohibited_set)
list_neibs.append(neibs)
set_list_neibs |= set(neibs)
return list_neibs, set_list_neibs
def get_constant_charge_qms(self, set_qms_edge, set_second_qms):
""" get indices of all qm atoms whose charge in MM
calculations is taken from the original MM-topology
(not from the QM calculation). These atoms are edge QM atoms
and their neighbors in QM which have only one neighbor.
At least C(edge-qm)-H(second-edge-qm) and C(edge-qm)=O(second-edge-qm)
"""
set_charge_exclusion = set_qms_edge
for second_qms in set_second_qms:
indices, offsets = self.neighbor_list.get_neighbors(second_qms)
if len(indices)== 1:
set_charge_exclusion.add(second_qms)
return set_charge_exclusion
def get_eq_distances_xy(\
self, topfilename = 'gromos.top', force_field = 'oplsaa'):
"""
The link atom is positioned as in
J. Chem. Theory Comput 2011, 7, 761-777, Eq 1
For this purpose we need the equilibrium length of each
QM-MM covalent bond. Those are obtained here from the
files of the force field.
"""
import os
print('in get_eq_distances_xy, topfilename=')
print ('%s' % topfilename)
for qm in self.qms_edge:
equilibrium_distance_xy = []
for iqm in qm:
equilibrium_distance_xy.append(0.0)
self.equilibrium_distances_xy.append(equilibrium_distance_xy)
#get the version of the topology file where one sees the bond
# force constants (file is named as gromacs.top.dump)
try:
os.remove(self.mm_calculator.base_filename+'.tpr.dump')
except OSError:
pass
os.system('gmxdump -s '+ self.mm_calculator.base_filename\
+'.tpr > ' + \
self.mm_calculator.base_filename+ \
'.tpr.dump 2>/dev/null')
if 'GMXDATA' in os.environ:
gromacs_home = os.environ['GMXDATA'].split(':')[0]
else:
gromacs_home = '/usr/local/gromacs/share/gromacs/'
#read the bonded force constants of this force field in order to
#get an estimate for X-Y bond constant
linesff = open(gromacs_home+ '/top/'+ force_field+ \
'.ff/ffbonded.itp', 'r').readlines()
oklinesff = []
start = False
for line in linesff:
if 'bondtypes' in line:
start = True
elif '[' in line:
break
if start and (line.strip()):
oklinesff.append(line)
#lines for getting oplsaa atom dual-types
if 'opls' in force_field:
lines_for_dual_types = open(gromacs_home+ '/top/'+ force_field+ \
'.ff/ffnonbonded.itp', 'r').readlines()
#read the types of interaction for bond stretching
lines_tpr = open(self.mm_calculator.base_filename+\
'.tpr.dump', 'r').readlines()
#read the topology file to get QM atom type
lines_top = open(topfilename, 'r').readlines()
oklines_top = []
start = False
for line in lines_top:
if start and ('[' in line):
break
if start:
if (not line.startswith(';')) or (not line.strip()):
oklines_top.append(line)
if '[ atoms' in line:
start = True
#get force constant and bond eq distance for all QM-MM bonds
#
ok_equilibrium_distances_xy = []
ok_qmatom_types = []
ok_mmatom_types = []
for qm0, mm0, eqsxy in zip(
self.qms_edge, self.mms_edge, \
self.equilibrium_distances_xy):
ok_eqxy = []
ok_qmatom_type = []
ok_mmatom_type = []
for qmatom, mmatom, eqxy in \
zip(qm0, mm0, eqsxy):
#find qm-mm bond in topology file (indexes from 0)
# get the index for interaction
interaction = 'empty'
for line in lines_tpr:
if (' type' in line) and ('BONDS' in line):
if (qmatom == int(line.split()[3])) and \
(mmatom == int(line.split()[4])):
interaction = line.split()[1].lstrip('type=')
break
if (qmatom == int(line.split()[4])) and \
(mmatom == int(line.split()[3])):
interaction = line.split()[1].lstrip('type=')
break
if interaction == 'empty':
print('QM-MM bond not found in topology')
print('atoms are: QM, MM: (from 1 indexing) %s' \
% str(qmatom+1) + str(mmatom+1))
sys.exit()
for line in lines_tpr:
if ('functype['+interaction+']=BONDS') in line:
r_xy0 = float(line.split()[2].rstrip(','))
#get type of the QM atom
qmatom_type = 'empty'
for line in oklines_top:
if (int(line.split()[0] ) == qmatom+ 1):
qmatom_type = line.split()[1]
#oplsaa atom type has a double name,
#the other one is used in file ffbonded.itp
break
if (qmatom_type == 'empty'):
print('problem in QM atom type')
sys.exit()
if 'opls' in force_field:
found = False
for line in lines_for_dual_types:
if (qmatom_type == line.split()[0]):
qmatom_type = line.split()[1]
found = True
break
if not found:
print('problem in QM atom type')
print('with OPLSAA force field dual atom types')
sys.exit()
#get type of the true link-MM atom
mmatom_type = 'empty'
for line in oklines_top:
if (int(line.split()[0] ) == mmatom+ 1):
mmatom_type = line.split()[1]
#oplsaa atom type has a double name,
#the other one is used in file ffbonded.itp
break
if (mmatom_type == 'empty'):
print('problem in MM atom type')
sys.exit()
if 'opls' in force_field:
found = False
for line in lines_for_dual_types:
if (mmatom_type == line.split()[0]):
mmatom_type = line.split()[1]
found = True
break
if not found:
print('problem in MM atom type')
print('with OPLSAA force field dual atom types')
sys.exit()
ok_qmatom_type.append(qmatom_type)
ok_mmatom_type.append(mmatom_type)
if (eqxy != 0.0):
#use eq constant given by the user
ok_eqxy.append(eqxy)
else:
ok_eqxy.append(r_xy0)
ok_equilibrium_distances_xy.append(ok_eqxy)
ok_qmatom_types.append(ok_qmatom_type)
ok_mmatom_types.append(ok_mmatom_type)
outfile = open('qm-mm-linkAtomsInfo.txt','w')
outfile.write(\
'=======================================================\n')
outfile.write('Information about QM-MM boundary(ies) \n')
outfile.write(\
'Created using the Atomic Simulation Environment (ASE) \n')
outfile.write(\
'=======================================================\n')
qmregion_count = 0
# ADD qm-mm-linkAtomsInfo.txt
for qm, mm, eqs_xy, eqs_xh, qmtypes, mmtypes in zip\
(self.qms_edge, self.mms_edge, ok_equilibrium_distances_xy,\
self.equilibrium_distances_xh,\
ok_qmatom_types, ok_mmatom_types):
outfile.write(\
'=======================================================\n')
qmregion_count = qmregion_count+ 1
outfile.write('Parameters related to QM region number '+\
str(qmregion_count)+'\n')
for qmatom, mmatom, eq_xy, eq_xh, qmtype, mmtype in zip\
(qm, mm, eqs_xy, eqs_xh,\
qmtypes, mmtypes):
outfile.write('qm-link-atom-index (from 1): '+str(qmatom)+'\n')
outfile.write('qm-link-atom-type: '+str(qmtype)+'\n')
outfile.write('mm-link-atom-index (from 1): '+str(mmatom)+'\n')
outfile.write('mm-link-atom-type: '+str(mmtype)+'\n')
outfile.write('qm-mm(notH)-equilibrium-distance: '\
+str(eq_xy)+' nm\n')
outfile.write('qm-H-equilibrium-distance(calculated by QM): '\
+str(eq_xh)+' nm\n')
outfile.close()
self.equilibrium_distances_xy = ok_equilibrium_distances_xy
self.qmatom_types = ok_qmatom_types
self.mmatom_types = ok_mmatom_types
return
def write_eq_distances_to_file(
self,
qm_links, filename='linkDATAout.txt'):
"""
Write classical bond equilibrium lengths
for XY (X in QM, Y in MM)
Write QM calculated XH(link atom) bond length (X in QM, H link atom)
"""
outfile = open(filename, 'w')
for iqm_region, qmlink in enumerate (qm_links):
for ilink, dummy in enumerate (qmlink):
data = self.equilibrium_distances_xy[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.equilibrium_distances_xh[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.force_constants[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.qmatom_types[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.mmatom_types[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
outfile.close()
return
def read_eq_distances_from_file(self, filename='linkDATAin.txt'):
"""
Read classical bond equilibrium lengths
for XY (X in QM, Y in MM) or XH (X in QM, H link atom)
"""
myfile = open(filename, 'r')
self.equilibrium_distances_xy = []
self.equilibrium_distances_xh = []
self.force_constants = []
self.qmatom_types = []
self.mmatom_types = []
print('Reading X-H and other data from file: %s' % filename)
for qm in self.qms_edge:
equilibrium_distance_xy = []
equilibrium_distance_xh = []
force_constant = []
qmatom_type = []
mmatom_type = []
for iqm, dum in enumerate(qm):
line = myfile.readline()
equilibrium_distance_xy.append(float(line.split()[0]))
line = myfile.readline()
equilibrium_distance_xh.append(float(line.split()[0]))
line = myfile.readline()
force_constant.append(float(line.split()[0]))
line = myfile.readline()
qmatom_type.append(line.split()[0])
line = myfile.readline()
mmatom_type.append(line.split()[0])
self.equilibrium_distances_xy.append(equilibrium_distance_xy)
self.equilibrium_distances_xh.append(equilibrium_distance_xh)
self.force_constants.append(force_constant)
self.qmatom_types.append(qmatom_type)
self.mmatom_types.append(mmatom_type)
myfile.close()
return
def get_eq_qm_atom_link_h_distances(self, system_tmp):
""" get equilibrium QMatom-linkH distances
for all linkH:s
by QM """
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from scipy.optimize import fmin
def qm_bond_energy_function(x, system_tmp, i_qm_region):
""" get the qm energy of a single qm system with a given
edge-qm-atom---link-h-atom distances of that qm region
The qm region is i_qm_region, all
edge-qm-atom---link-h-atom distance in this qm_region are
optimized simultaneously
"""
BIG_VALUE = 100000000.0
for index_x, current_x in enumerate(x):
self.equilibrium_distances_xh\
[i_qm_region][index_x] = current_x
print('current X-H bond lengths [nm]')
print('%s' % str(x))
self.link_atoms = self.get_link_atoms(\
self.qms_edge, self.mms_edge,\
self.force_constants,\
self.equilibrium_distances_xh, \
self.equilibrium_distances_xy)
self.qmsystems = \
self.define_QM_clusters_in_vacuum(system_tmp)
#try:
single_qm_energy = self.calculate_single_qm(\
self.qmsystems[i_qm_region],\
self.qm_calculators[i_qm_region])
#except RuntimeError:
# single_qm_energy = BIG_VALUE
return single_qm_energy
print('=====================================================')
print('Calculating X-H bond lengths and bond force constants')
print('by QM in one shot for each QM region.')
print('In later calculations you can: ')
print('cp linkDATAout.txt linkDATAin.txt')
print("and set link_info = 'byFILE'")
print('=====================================================')
self.equilibrium_distances_xh = []
self.force_constants = []
for qm_edges in self.qms_edge:
force_constants = []
equilibrium_distances_xh = []
for qm_edge in qm_edges:
force_constants.append(0.0)
equilibrium_distances_xh.append(0.11)
self.force_constants.append(force_constants)
self.equilibrium_distances_xh.append(equilibrium_distances_xh)
#loop over qm regions. To get optimal simultaneous
# edgeQMatom-linkH distance(s) in [nm] in that qm region
for i_qm_region in range(len(self.qms_edge)):
print('NOW running : ')
print('QM region for optimising edge-linkH distances %s'\
% str(i_qm_region))
x = self.equilibrium_distances_xh[i_qm_region][:]
xopt = fmin(qm_bond_energy_function, \
x,\
args=(system_tmp, i_qm_region),\
xtol=0.0001, ftol=0.0001)
for index_xopt, current_xopt in enumerate(xopt):
self.equilibrium_distances_xh\
[i_qm_region][index_xopt] = current_xopt
print('i_qm_region, i_link_atom, optimal X-H bond[nm] %s' \
% (str(i_qm_region) + ' ' + str(index_xopt) \
+ ' ' + str(current_xopt)))
def define_QM_clusters_in_vacuum(self, system):
""" Returns Each QM system as an Atoms object
We get a list of these Atoms objects
(in case we have many QM regions).
"""
from ase import Atoms
qmsystems = []
for qm0 in self.qms:
tmp_system = Atoms()
for qmatom in qm0:
tmp_system += system[qmatom]
qmsystems.append(tmp_system)
for link_atom in self.link_atoms:
tmp_atom = link_atom.get_link_atom()
qm_region = link_atom.get_link_atom_qm_region_index()
link_atom_index_in_qm = len(qmsystems[qm_region])
qmsystems[qm_region].append(tmp_atom)
link_atom.set_link_atom_index_in_qm(link_atom_index_in_qm)
return qmsystems
def kill_top_lines_containing_only_qm_atoms(self, \
intopfilename, \
qms, outtopfilename):
"""
Delete all lines in the topology file that contain only qm atoms
in bonded sections
(bonds, angles or dihedrals)
and in pairs section (1-4 interactions)
"""
# get an index of all qm atoms in all qm regions
qm = set()
for qm_tmp in qms:
qm = qm.union(set(qm_tmp))
infile = open(intopfilename,'r')
lines = infile.readlines()
infile.close()
outfile = sys.stdout
oklines = []
accept = True
check = ''
for line in lines:
if (('[ bonds' in line)):
oklines.append(line)
accept = False
check = 'bond'
elif (('[ angles' in line)):
oklines.append(line)
accept = False
check = 'angle'
elif (('[ dihedrals' in line)):
oklines.append(line)
accept = False
check = 'dihedral'
elif (('[ pairs' in line)):
oklines.append(line)
accept = False
check = 'pair'
elif ('[' in line):
oklines.append(line)
accept = True
check = ''
elif line in ['\n']:
oklines.append(line)
accept = True
check = ''
elif accept:
oklines.append(line)
else:
indexes = [int(float(s)-1.0) \
for s in line.split() if s.isdigit()]
indexes1 = [int(s) for s in line.split() if s.isdigit()]
if indexes == []:# this takes comment line
#after bond, angle, dihedral
oklines.append(line)
elif check == 'bond':
bondedatoms = set(indexes[0:2])
#set empty bond intereaction for qm-qm bonds (type 5)
#(this way LJ and electrostatics is not messed up)
if (bondedatoms.issubset(qm)):
newline = str(indexes1[0]).rjust(8)+\
str(indexes1[1]).rjust(8)+\
('5').rjust(8) + '\n'
oklines.append(newline)
else:
oklines.append(line)
elif check == 'angle':
bondedatoms = set(indexes[0:3])
if (bondedatoms.issubset(qm)):
pass
else:
oklines.append(line)
elif check == 'dihedral':
bondedatoms = set(indexes[0:4])
if (bondedatoms.issubset(qm)):
pass
else:
oklines.append(line)
elif check == 'pair':
bondedatoms = set(indexes[0:2])
if (bondedatoms.issubset(qm)):
pass
else:
oklines.append(line)
outfile = open(outtopfilename,'w')
for line in oklines:
outfile.write(line)
outfile.close()
return
def get_classical_target_charge_sums(self, intopfilename, qms):
""" get sum of MM charges of the charged changed by QM
these are qm atoms that are not link-atoms or edge-qm atoms
xxx this has a problem:
Water is in .itp files, not in topology...
"""
infile = open(intopfilename,'r')
lines = infile.readlines()
infile.close()
(lines_before, comment_lines, ok_lines, lines_after) = \
self.get_topology_lines(lines)
classical_target_charge_sums = []
for iqm, qm in enumerate(qms):
classical_target_charge_sum = 0.0
for line in ok_lines:
atom_index = int(line.split()[0])-1
if (atom_index in qm) and \
(not(atom_index in self.constant_charge_qms)):
classical_target_charge_sum = \
classical_target_charge_sum + \
float(line.split()[6])
classical_target_charge_sums.\
append(classical_target_charge_sum)
return classical_target_charge_sums
| gpl-2.0 |
bhargav/scikit-learn | sklearn/cluster/tests/test_k_means.py | 41 | 27789 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:, :2], n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1)
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be', km.fit, X)
| bsd-3-clause |
cphyc/matplotlib-label-lines | labellines/test.py | 1 | 7046 | import warnings
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.dates import UTC, DateFormatter, DayLocator
from matplotlib.testing import setup
from numpy.testing import assert_raises
from .core import labelLine, labelLines
@pytest.fixture()
def setupMpl():
setup()
plt.clf()
@pytest.mark.mpl_image_compare
def test_linspace(setupMpl):
x = np.linspace(0, 1)
K = [1, 2, 4]
for k in K:
plt.plot(x, np.sin(k * x), label=rf"$f(x)=\sin({k} x)$")
labelLines(plt.gca().get_lines(), zorder=2.5)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_ylogspace(setupMpl):
x = np.linspace(0, 1)
K = [1, 2, 4]
for k in K:
plt.plot(x, np.exp(k * x), label=r"$f(x)=\exp(%s x)$" % k)
plt.yscale("log")
labelLines(plt.gca().get_lines(), zorder=2.5)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_xlogspace(setupMpl):
x = np.linspace(0, 1)
K = [1, 2, 4]
for k in K:
plt.plot(10 ** x, k * x, label=r"$f(x)=%s x$" % k)
plt.xscale("log")
labelLines(plt.gca().get_lines(), zorder=2.5)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_xylogspace(setupMpl):
x = np.geomspace(1e-1, 1e1)
K = np.arange(-5, 5, 2)
for k in K:
plt.plot(x, x ** k, label=rf"$f(x)=x^{{{k}}}$")
plt.xscale("log")
plt.yscale("log")
labelLines(plt.gca().get_lines(), zorder=2.5)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_align(setupMpl):
x = np.linspace(0, 2 * np.pi)
y = np.sin(x)
lines = plt.plot(x, y, label=r"$\sin(x)$")
labelLines(lines, align=False)
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_labels_range(setupMpl):
x = np.linspace(0, 1)
plt.plot(x, np.sin(x), label=r"$\sin x$")
plt.plot(x, np.cos(x), label=r"$\cos x$")
labelLines(plt.gca().get_lines(), xvals=(0, 0.5))
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_dateaxis_naive(setupMpl):
dates = [datetime(2018, 11, 1), datetime(2018, 11, 2), datetime(2018, 11, 3)]
plt.plot(dates, [0, 5, 3], label="apples")
plt.plot(dates, [3, 6, 2], label="banana")
ax = plt.gca()
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d"))
labelLines(ax.get_lines())
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_dateaxis_advanced(setupMpl):
dates = [
datetime(2018, 11, 1, tzinfo=UTC),
datetime(2018, 11, 2, tzinfo=UTC),
datetime(2018, 11, 5, tzinfo=UTC),
datetime(2018, 11, 3, tzinfo=UTC),
]
plt.plot(dates, [0, 5, 3, 0], label="apples")
plt.plot(dates, [3, 6, 2, 1], label="banana")
ax = plt.gca()
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d"))
labelLines(ax.get_lines())
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_polar(setupMpl):
t = np.linspace(0, 2 * np.pi, num=128)
plt.plot(np.cos(t), np.sin(t), label="$1/1$")
plt.plot(np.cos(t), np.sin(2 * t), label="$1/2$")
plt.plot(np.cos(3 * t), np.sin(t), label="$3/1$")
ax = plt.gca()
labelLines(ax.get_lines())
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_non_uniform_and_negative_spacing(setupMpl):
x = [1, -2, -3, 2, -4, -3]
plt.plot(x, [1, 2, 3, 4, 2, 1], ".-", label="apples")
plt.plot(x, [6, 5, 4, 2, 5, 5], "o-", label="banana")
ax = plt.gca()
labelLines(ax.get_lines())
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_errorbar(setupMpl):
x = np.linspace(0, 1, 20)
y = x ** 0.5
dy = x
plt.errorbar(x, y, yerr=dy, label=r"$\sqrt{x}\pm x$")
y = x ** 3
dy = x
plt.errorbar(x, y, yerr=dy, label=r"$x^3\pm x$")
ax = plt.gca()
labelLines(ax.get_lines())
return plt.gcf()
def test_nan_warning():
x = np.array([0, 1, 2, 3])
y = np.array([np.nan, np.nan, 0, 1])
line = plt.plot(x, y, label="test")[0]
with warnings.catch_warnings(record=True) as w:
labelLine(line, 0.5)
assert issubclass(w[-1].category, UserWarning)
assert "could not be annotated" in str(w[-1].message)
with warnings.catch_warnings(record=True) as w:
labelLine(line, 2.5)
assert len(w) == 0
def test_nan_failure():
x = np.array([0, 1])
y = np.array([np.nan, np.nan])
line = plt.plot(x, y, label="test")[0]
with assert_raises(Exception):
labelLine(line, 0.5)
@pytest.mark.mpl_image_compare
def test_label_range(setupMpl):
x = np.linspace(0, 1)
line = plt.plot(x, x ** 2)[0]
# This should fail
with assert_raises(Exception):
labelLine(line, -1)
with assert_raises(Exception):
labelLine(line, 2)
# This should work
labelLine(line, 0.5)
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_negative_spacing(setupMpl):
x = np.linspace(1, -1)
y = x ** 2
line = plt.plot(x, y)[0]
# Should not throw an error
labelLine(line, 0.2, label="Test")
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_label_datetime_plot(setupMpl):
plt.clf()
# data from the chinook database of iTunes music sales
x = np.array(
[
"2009-01-31T00:00:00.000000000",
"2009-02-28T00:00:00.000000000",
"2009-03-31T00:00:00.000000000",
"2009-04-30T00:00:00.000000000",
"2009-06-30T00:00:00.000000000",
"2009-09-30T00:00:00.000000000",
"2009-10-31T00:00:00.000000000",
"2009-11-30T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
y = np.array([13.86, 14.85, 28.71, 42.57, 61.38, 76.23, 77.22, 81.18])
line = plt.plot_date(x, y, "-")[0]
plt.xticks(rotation=45)
# should not throw an error
xlabel = datetime(2009, 3, 15)
labelLine(line, xlabel, "USA")
plt.tight_layout()
return plt.gcf()
def test_yoffset(setupMpl):
x = np.linspace(0, 1)
for yoffset in ([-0.5, 0.5], 1, 1.2): # try lists # try int # try float
plt.clf()
ax = plt.gca()
ax.plot(x, np.sin(x) * 10, label=r"$\sin x$")
ax.plot(x, np.cos(x) * 10, label=r"$\cos x$")
lines = ax.get_lines()
labelLines(
lines, xvals=(0.2, 0.7), align=False, yoffsets=yoffset, bbox={"alpha": 0}
)
@pytest.mark.mpl_image_compare
def test_outline(setupMpl):
x = np.linspace(-2, 2)
plt.ylim(-1, 5)
plt.xlim(-2, 2)
for dy, xlabel, w in zip(
np.linspace(-1, 1, 5),
np.linspace(-1.5, 1.5, 5),
np.linspace(0, 16, 5),
):
y = x ** 2 + dy
(line,) = plt.plot(x, y, label=f"width={w}")
labelLine(line, xlabel, outline_width=w, outline_color="gray")
return plt.gcf()
| mit |
ThomasMiconi/htmresearch | projects/union_pooling/experiments/union_sdr_overlap/plot_experiment.py | 12 | 4519 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import csv
import os
import sys
import matplotlib.pyplot as plt
import numpy
from htmresearch.support import data_utils
_OVERLAPS_FILE_NAME = "/overlaps.csv"
def main(inputPath, csvOutputPath, imgOutputPath):
# remove existing /overlaps.csv if present
if os.path.exists(csvOutputPath + _OVERLAPS_FILE_NAME):
os.remove(csvOutputPath + _OVERLAPS_FILE_NAME)
if not os.path.exists(csvOutputPath):
os.makedirs(csvOutputPath)
if not os.path.exists(imgOutputPath):
os.makedirs(imgOutputPath)
print "Computing Union SDR overlap between SDR traces in following dir:"
print inputPath + "\n"
filesAll = os.listdir(inputPath)
files = []
for _ in xrange(len(filesAll)):
if filesAll[_].find('unionSdrTrace') != -1:
files.append(filesAll[_])
if len(files) != 2:
print "Found {0} files at input path {1} - Requires exactly 2.".format(
len(files), inputPath)
sys.exit(1)
pathNoLearn = inputPath + "/" + files[0]
pathLearn = inputPath + "/" + files[1]
print "Comparing files..."
print pathLearn
print pathNoLearn + "\n"
# Load source A
with open(pathLearn, "rU") as fileA:
csvReader = csv.reader(fileA)
dataA = [line for line in csvReader]
unionSizeA = [len(datum) for datum in dataA]
# Load source B
with open(pathNoLearn, "rU") as fileB:
csvReader = csv.reader(fileB)
dataB = [line for line in csvReader]
unionSizeB = [len(datum) for datum in dataB]
assert len(dataA) == len(dataB)
# To display all plots on the same y scale
yRangeMax = 1.05 * max(max(unionSizeA), max(unionSizeB))
# Plot union size for data A
x = [i for i in xrange(len(dataA))]
stdDevs = None
title = "Union Size with Learning vs. Time"
data_utils.getErrorbarFigure(title, x, unionSizeA, stdDevs, "Time",
"Union Size", yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
# Plot union size for data B and save image
title = "Union Size without Learning vs. Time"
data_utils.getErrorbarFigure(title, x, unionSizeB, stdDevs, "Time",
"Union Size", yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
with open(csvOutputPath + _OVERLAPS_FILE_NAME, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
overlaps = [getOverlap(dataA[i], dataB[i]) for i in xrange(len(dataA))]
csvWriter.writerow(overlaps)
outputFile.flush()
# Plot overlap and save image
title = "Learn-NoLearn Union SDR Overlap vs. Time"
data_utils.getErrorbarFigure(title, x, overlaps, stdDevs, "Time","Overlap",
yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
raw_input("Press any key to exit...")
def getOverlap(listA, listB):
arrayA = numpy.array(listA)
arrayB = numpy.array(listB)
intersection = numpy.intersect1d(arrayA, arrayB)
return len(intersection)
def _getArgs():
"""
Parses and returns command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="Path to unionSdrTrace .csv files")
parser.add_argument("--csvOutput", help="Path for csv output.")
parser.add_argument("--imgOutput", help="Path for image output.")
return parser.parse_args()
if __name__ == "__main__":
args = _getArgs()
main(args.input, args.csvOutput, args.imgOutput)
| agpl-3.0 |
aolindahl/polarization-monitor | offline_viewer_2.py | 1 | 17029 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 15:32:02 2015
@author: Anton O Lindahl
"""
import h5py
import numpy as np
import matplotlib.pyplot as plt
import lmfit
import sys
import os.path
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
'/aolPyModules')
import cookie_box
from BurningDetectors_V6 import projector
proj = projector()
plt.ion()
h5_names = ['data/amom0115_5_1.h5',
'data/amom0115_16_0.h5',
'data/amom0115_25_0.h5']
h5_names = ['data/amom0115_31_0.h5']
photo_roi = [[240, 250]]*16
photo_roi_1 = [[237, 241]]*16
photo_roi_2 = [[242.5, 250]]*16
auger_roi = [[215, 220]]*16
traces = {}
average_traces = {}
time_scales = {}
photo_roi_slices = {}
photo_bg_slices = {}
photo_roi_1_slices = {}
photo_roi_2_slices = {}
photo_bg_slices = {}
photo_bg_1_slices = {}
photo_bg_2_slices = {}
auger_roi_slices = {}
fee_mean = {}
fee_valid = {}
fee = {}
auger_sum = {}
auger_signals_average = {}
photo_signals_average_corrected = {}
bg_factors = {}
photo_signals_corrected = {}
auger_signals = {}
auger_signals_average = {}
auger_factors = {}
photo_signals_average_corrected = {}
photo_signals_1_average_corrected = {}
photo_signals_2_average_corrected = {}
bg_factors = {}
bg_1_factors = {}
bg_2_factors = {}
photo_signals = {}
photo_signals_1 = {}
photo_signals_2 = {}
photo_bg = {}
photo_bg_1 = {}
photo_bg_2 = {}
photo_signals_corrected = {}
photo_signals_1_corrected = {}
photo_signals_2_corrected = {}
bg_fit_coeffs = {}
runs = []
for h5_name in h5_names:
run = int(h5_name.split('_')[1])
runs.append(run)
traces[run] = []
average_traces[run] = []
time_scales[run] = []
photo_roi_slices[run] = []
auger_roi_slices[run] = []
photo_bg_slices[run] = []
photo_signals_corrected[run] = []
bg_factors[run] = []
photo_roi_1_slices[run] = []
photo_roi_2_slices[run] = []
auger_roi_slices[run] = []
photo_bg_slices[run] = []
photo_bg_1_slices[run] = []
photo_bg_2_slices[run] = []
photo_bg[run] = []
photo_bg_1[run] = []
photo_bg_2[run] = []
photo_signals[run] = []
photo_signals_1[run] = []
photo_signals_2[run] = []
photo_signals_corrected[run] = []
photo_signals_1_corrected[run] = []
photo_signals_2_corrected[run] = []
auger_signals[run] = []
bg_factors[run] = []
bg_1_factors[run] = []
bg_2_factors[run] = []
bg_fit_coeffs[run] = []
# with h5py.File(h5_name, 'r+') as h5_file:
h5_file = h5py.File(h5_name, 'r+')
valid = np.zeros(h5_file['fee'].len(), dtype=bool)
hits = h5_file.attrs.get('n_events_set')
valid[:hits] = 1
fee[run] = h5_file['fee'][:, 2:].mean(axis=1)
valid *= np.isfinite(fee[run]) * (fee[run] > 0.005)
fee_valid[run] = fee[run][valid]
fee_mean[run] = fee_valid[run].mean()
for i in range(16):
traces[run].append(
h5_file['time_amplitudes/det_{}'.format(i)].value[valid, :])
average_traces[run].append(np.average(traces[run][i],
axis=0) / np.mean(fee_mean[run]))
# average_traces[run].append(np.average(
# h5_file['time_amplitudes/det_{}'.format(i)].value[valid, :],
# axis=0) * 1e3)
time_scales[run].append(
h5_file['time_scales/det_{}'.format(i)].value * 1e3)
photo_roi_slices[run].append(
slice(time_scales[run][i].searchsorted(photo_roi[i][0]),
time_scales[run][i].searchsorted(photo_roi[i][1],
side='right')))
photo_roi_1_slices[run].append(
slice(time_scales[run][i].searchsorted(photo_roi_1[i][0]),
time_scales[run][i].searchsorted(photo_roi_1[i][1],
side='right')))
photo_roi_2_slices[run].append(
slice(time_scales[run][i].searchsorted(photo_roi_2[i][0]),
time_scales[run][i].searchsorted(photo_roi_2[i][1],
side='right')))
photo_bg_slices[run].append(slice(photo_roi_slices[run][i].start - 30,
photo_roi_slices[run][i].start))
photo_bg_1_slices[run].append(
slice(photo_roi_1_slices[run][i].start - 10,
photo_roi_1_slices[run][i].start))
photo_bg_2_slices[run].append(
slice(photo_roi_2_slices[run][i].start - 5,
photo_roi_2_slices[run][i].start))
bg_fit_coeffs[run].append(np.polyfit(
time_scales[run][i][photo_bg_slices[run][i]],
average_traces[run][i][photo_bg_slices[run][i]], 1))
bg_factors[run].append(
np.polyval(bg_fit_coeffs[run][i],
time_scales[run][i][photo_roi_slices[run][i]]).sum() /
np.polyval(bg_fit_coeffs[run][i],
time_scales[run][i][photo_bg_slices[run][i]]).sum())
# bg_factors[run].append((photo_roi_slices[run][i].stop -
# photo_roi_slices[run][i].start) /
# (photo_bg_slices[run][i].stop -
# photo_bg_slices[run][i].start))
bg_1_factors[run].append((photo_roi_1_slices[run][i].stop -
photo_roi_1_slices[run][i].start) /
(photo_bg_1_slices[run][i].stop -
photo_bg_1_slices[run][i].start))
bg_2_factors[run].append((photo_roi_2_slices[run][i].stop -
photo_roi_2_slices[run][i].start) /
(photo_bg_2_slices[run][i].stop -
photo_bg_2_slices[run][i].start))
auger_roi_slices[run].append(
slice(time_scales[run][i].searchsorted(auger_roi[i][0]),
time_scales[run][i].searchsorted(auger_roi[i][1],
side='right')))
photo_signals[run].append(
traces[run][i][:, photo_roi_slices[run][i]].sum(axis=1))
photo_bg[run].append(
traces[run][i][:, photo_bg_slices[run][i]].sum(axis=1) *
bg_factors[run][i])
photo_signals_corrected[run].append(
photo_signals[run][i] - photo_bg[run][i])
photo_signals_1[run].append(
traces[run][i][:, photo_roi_1_slices[run][i]].sum(axis=1))
photo_bg_1[run].append(
traces[run][i][:, photo_bg_1_slices[run][i]].sum(axis=1) *
bg_1_factors[run][i])
photo_signals_1_corrected[run].append(
photo_signals_1[run][i] - photo_bg_1[run][i])
photo_signals_2[run].append(
traces[run][i][:, photo_roi_2_slices[run][i]].sum(axis=1))
photo_bg_2[run].append(
traces[run][i][:, photo_bg_2_slices[run][i]].sum(axis=1) *
bg_2_factors[run][i])
photo_signals_2_corrected[run].append(
photo_signals_2[run][i] - photo_bg_2[run][i])
auger_signals[run].append(
traces[run][i][:, auger_roi_slices[run][i]].sum(axis=1))
photo_signals_average_corrected[run] = np.average(
photo_signals_corrected[run], axis=1)
photo_signals_1_average_corrected[run] = np.average(
photo_signals_1_corrected[run], axis=1)
photo_signals_2_average_corrected[run] = np.average(
photo_signals_2_corrected[run], axis=1)
auger_signals_average[run] = np.average(
auger_signals[run], axis=1)
auger_factors[run] = (auger_signals_average[run].max() /
auger_signals_average[run])
auger_sum[run] = np.sum(auger_signals_average[run])
# %% Signal tests
#
#sig_min = -0.5
#sig_max = 4
#n_sig_bins = 2**7
#sig_ax = np.linspace(sig_min, sig_max, 2 * n_sig_bins + 1)[1::2]
#
#n_rows = int(np.floor(np.sqrt(float(len(runs)))))
#n_cols = int(np.ceil(float(len(runs))/n_rows))
#
#sig_level_fig = plt.figure('Signal levels')
#sig_level_fig.clf()
#sig_level_fig, sig_level_axis_array = plt.subplots(n_rows*2, n_cols,
# num='Signal levels')
#det = 4
#for i_run, run in enumerate(runs):
# ax = sig_level_axis_array.flatten()[i_run]
# ax.set_title('run {}'.format(run))
# ax.hist(photo_signals[run][det], n_sig_bins, (sig_min, sig_max))
# ax.hist(photo_bg[run][det], n_sig_bins, (sig_min, sig_max))
# ax.hist(photo_signals_corrected[run][det], n_sig_bins, (sig_min, sig_max))
#
# ax = sig_level_axis_array.flatten()[i_run + len(runs)]
# ax.plot(fee_valid[run], photo_signals_corrected[run][det], '.')
# ax.plot(fee_valid[run], auger_signals[run][det], '.')
#sig_level_fig.tight_layout()
# %% Trace plots
try:
trace_plot = plt.figure('Trace plot')
trace_plot.clf()
except:
pass
trace_plot, trace_axis_array = plt.subplots(4, 4, sharex=True, sharey=True,
num='Trace plot')
for i_run, run in enumerate(runs):
for i, ax in enumerate(trace_axis_array.flatten()):
ax.plot(time_scales[run][i], average_traces[run][i],
'-{}'.format('byc'[i_run]),
label='{} {} deg'.format(run, 22.5*i))
ax.plot(time_scales[run][i][auger_roi_slices[run][i]],
average_traces[run][i][auger_roi_slices[run][i]], '.g')
if run != 31:
ax.plot(time_scales[run][i][photo_roi_slices[run][i]],
average_traces[run][i][photo_roi_slices[run][i]], '.r')
ax.plot(time_scales[run][i][photo_bg_slices[run][i]],
average_traces[run][i][photo_bg_slices[run][i]], '.m')
# ax.plot(time_scales[run][i],
# np.polyval(bg_fit_coeffs[run][i], time_scales[run][i]),
# 'm')
else:
ax.plot(time_scales[run][i][photo_roi_1_slices[run][i]],
average_traces[run][i][photo_roi_1_slices[run][i]], '.r')
ax.plot(time_scales[run][i][photo_bg_1_slices[run][i]],
average_traces[run][i][photo_bg_1_slices[run][i]], '.m')
ax.plot(time_scales[run][i][photo_roi_2_slices[run][i]],
average_traces[run][i][photo_roi_2_slices[run][i]], '.y')
ax.plot(time_scales[run][i][photo_bg_2_slices[run][i]],
average_traces[run][i][photo_bg_2_slices[run][i]], '.c')
if i % 4:
plt.setp(ax.get_yticklabels(), visible=False)
else:
ax.set_ylabel('signal (mV)')
if i / 4 < 3:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel('time (ns)')
ax.grid(True)
ax.legend(loc='best', fontsize='x-small', ncol=1)
# ax.set_title('Run {}'.format(run))
ax.set_xlim(200, 260)
plt.tight_layout()
# %%
try:
angular_plot = plt.figure('Angular')
angular_plot.clf()
except:
pass
angular_plot, angular_axis_array = plt.subplots(1, 3,
subplot_kw={'polar': True},
num='Angular',
figsize=(8, 10))
phi = cookie_box.phi_rad
phi_line = np.linspace(0, 2*np.pi, 2**8)
norm_params = cookie_box.initial_params()
norm_params['A'].value = 1
norm_params['beta'].value = 2
norm_params['beta'].vary = False
norm_params['tilt'].value = 0
norm_params['tilt'].vary = False
norm_params['linear'].value = 1
norm_params['linear'].vary = False
#lmfit.minimize(cookie_box.model_function, norm_params,
# args=(phi,
# photo_signals_average_corrected[5] * auger_factors[5]))
#beta2_factors = (cookie_box.model_function(norm_params, phi) /
# (photo_signals_average_corrected[5] * auger_factors[5]))
I_fit = np.ones(16, dtype=bool)
#I_fit[[4, 5, 11, 12]] = 0
proj.setFitMask(I_fit)
full_falctors = {}
for ax, run in zip(angular_axis_array, runs):
# signal_factors = beta2_factors
# signal_factors = auger_factors[run] * beta2_factors
signal_factors = auger_factors[run]
ax.plot(phi, auger_signals_average[run], 'gx', label='auger raw')
ax.plot(phi, auger_signals_average[run] * signal_factors, 'gs',
label='auger scaled')
ax.plot(phi, photo_signals_average_corrected[run], 'rx',
label='photo raw')
ax.plot(phi, photo_signals_average_corrected[run] * signal_factors,
'ro', label='photo scaled')
params = cookie_box.initial_params()
params['beta'].vary = False
params['A'].value, params['linear'].value, params['tilt'].value = \
proj.solve(photo_signals_average_corrected[run] * signal_factors,
2)
res = lmfit.minimize(cookie_box.model_function, params,
args=(phi[I_fit],
(photo_signals_average_corrected[run] *
signal_factors)[I_fit]))
lmfit.report_fit(params)
ax.plot(phi_line, cookie_box.model_function(params, phi_line), 'm',
label='fit')
ax.set_title('Run {}'.format(run))
ax.legend(loc='upper right', fontsize='x-small')
angular_plot.tight_layout()
# ax.plot(phi, photo_signals_average_corrected[run] * factors, 'ro')
# %% Run 31 polar
if 31 in traces.keys():
run = 31
signal_factors = auger_factors[run]
fig31 = plt.figure(run)
fig31.clf()
ax = plt.subplot(131, polar=True)
ax.plot(phi, auger_signals_average[run], 'gx', label='auger raw')
ax.plot(phi, auger_signals_average[run] * signal_factors, 'gs',
label='auger scaled')
ax.legend(loc='best', fontsize='small')
ax.set_title('run31 augers')
params = cookie_box.initial_params()
params['beta'].vary = False
params['A'].value, params['linear'].value, params['tilt'].value = \
proj.solve(photo_signals_1_average_corrected[run] * signal_factors,
2)
lmfit.minimize(cookie_box.model_function, params,
args=(phi[I_fit],
(photo_signals_1_average_corrected[run] *
signal_factors)[I_fit]))
ax = plt.subplot(132, polar=True)
ax.plot(phi, photo_signals_1_average_corrected[run], 'rx',
label='photo 1 raw')
ax.plot(phi, photo_signals_1_average_corrected[run] * signal_factors,
'ro', label='photo 1 scaled')
ax.plot(phi_line, cookie_box.model_function(params, phi_line), 'm',
label='fit {:.3} lin {:.3} circ'.format(
params['linear'].value,
np.sqrt(1 - params['linear'].value**2)))
ax.legend(loc='best', fontsize='small')
ax.set_title('run31 first photoline (high energy)')
params['A'].value, params['linear'].value, params['tilt'].value = \
proj.solve(photo_signals_2_average_corrected[run] * signal_factors,
2)
lmfit.minimize(cookie_box.model_function, params,
args=(phi[I_fit],
(photo_signals_2_average_corrected[run] *
signal_factors)[I_fit]))
ax = plt.subplot(133, polar=True)
ax.plot(phi, photo_signals_2_average_corrected[run], 'yx',
label='photo 2 raw')
ax.plot(phi, photo_signals_2_average_corrected[run] * signal_factors,
'yo', label='photo 2 scaled')
ax.plot(phi_line, cookie_box.model_function(params, phi_line), 'm',
label='fit {:.3} lin {:.3} circ'.format(
params['linear'].value,
np.sqrt(1 - params['linear'].value**2)))
ax.legend(loc='best', fontsize='small')
ax.set_title('run31 second photoline (low energy)')
fig31.tight_layout()
# %%
#try:
# pol_prog_fig = plt.figure('Polar progression')
# pol_prog_fig.clf()
#except:
# pass
#
#n_rows = int(np.floor(np.sqrt(float(len(runs)))))
#n_cols = int(np.ceil(float(len(runs))/n_rows))
#
#pol_prog_fig, pol_prog_axis_array = plt.subplots(n_rows, n_cols,
# subplot_kw={'polar': True},
# num='Polar progression')
#
#for ax, run in zip(pol_prog_axis_array.flatten(), runs):
# ax.set_title('Run {}'.format(run))
# ax.plot(phi, photo_signals_average_corrected[run], 'rx')
## ax.plot(phi, photo_signals_average_corrected[run] * beta2_factors, 'ro')
#
# params = cookie_box.initial_params()
# params['beta'].vary = False
## params['A'].value, params['linear'].value, params['tilt'].value = \
## proj.solve(photo_signals_average_corrected[run] * beta2_factors, 2)
## res = lmfit.minimize(cookie_box.model_function, params,
## args=(phi[I_fit],
## (photo_signals_average_corrected[run] *
## beta2_factors)[I_fit]))
#
# ax.plot(phi_line, cookie_box.model_function(params, phi_line), 'm')
#
# print 'Run {}'.format(run)
# lmfit.report_fit(params)
#
#plt.tight_layout()
| gpl-2.0 |
jaidevd/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
jaredleekatzman/DeepSurv | experiments/scripts/deepsurv_run.py | 1 | 6839 | import argparse
from collections import defaultdict
import pandas as pd
import numpy as np
import h5py
import uuid
import copy
import json
import sys, os
sys.path.append("/DeepSurv/deepsurv")
import deep_surv
# Force matplotlib to not use any Xwindows backend.
import matplotlib
matplotlib.use('Agg')
import viz
import utils
from deepsurv_logger import TensorboardLogger
import time
localtime = time.localtime()
TIMESTRING = time.strftime("%m%d%Y%M", localtime)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('experiment', help="name of the experiment that is being run")
parser.add_argument('model', help='Model .json file to load')
parser.add_argument('dataset', help='.h5 File containing the train/valid/test datasets')
parser.add_argument('--update_fn',help='Which lasagne optimizer to use (ie. sgd, adam, rmsprop)', default='sgd')
parser.add_argument('--plot_error', action="store_true", help="If arg present, plot absolute error plots")
parser.add_argument('--treatment_idx', default=None, type=int, help='(Optional) column index of treatment variable in dataset. If present, run treatment visualizations.')
parser.add_argument('--results_dir', help="Output directory to save results (model weights, visualizations)", default=None)
parser.add_argument('--weights', help='(Optional) Weights .h5 File', default=None)
parser.add_argument('--num_epochs', type=int, default=500, help="Number of epochs to train for. Default: 500")
return parser.parse_args()
def evaluate_model(model, dataset, bootstrap = False):
def mse(model):
def deepsurv_mse(x, hr, **kwargs):
hr_pred = np.squeeze(model.predict_risk(x))
return ((hr_pred - hr) ** 2).mean()
return deepsurv_mse
metrics = {}
# Calculate c_index
metrics['c_index'] = model.get_concordance_index(**dataset)
if bootstrap:
metrics['c_index_bootstrap'] = utils.bootstrap_metric(model.get_concordance_index, dataset)
# Calcualte MSE
if 'hr' in dataset:
metrics['mse'] = mse(model)(**dataset)
if bootstrap:
metrics['mse_bootstrap'] = utils.bootstrap_metric(mse(model), dataset)
return metrics
def save_risk_surface_visualizations(model, dataset, norm_vals, output_dir, plot_error, experiment,
trt_idx):
if experiment == 'linear':
clim = (-3,3)
elif experiment == 'gaussian' or experiment == 'treatment':
clim = (-1,1)
else:
clim = (0,1)
risk_fxn = lambda x: np.squeeze(model.predict_risk(x))
color_output_file = os.path.join(output_dir, "deep_viz_color_" + TIMESTRING + ".pdf")
viz.plot_experiment_scatters(risk_fxn, dataset, norm_vals = norm_vals,
output_file=color_output_file, figsize=(4,3), clim=clim,
plot_error = plot_error, trt_idx = trt_idx)
bw_output_file = os.path.join(output_dir, "deep_viz_bw_" + TIMESTRING + ".pdf")
viz.plot_experiment_scatters(risk_fxn, dataset, norm_vals = norm_vals,
output_file=bw_output_file, figsize=(4,3), clim=clim, cmap='gray',
plot_error = plot_error, trt_idx = trt_idx)
def save_treatment_rec_visualizations(model, dataset, output_dir,
trt_i = 1, trt_j = 0, trt_idx = 0):
trt_values = np.unique(dataset['x'][:,trt_idx])
print("Recommending treatments:", trt_values)
rec_trt = model.recommend_treatment(dataset['x'], trt_i, trt_j, trt_idx)
rec_trt = np.squeeze((rec_trt < 0).astype(np.int32))
rec_dict = utils.calculate_recs_and_antirecs(rec_trt, true_trt = trt_idx, dataset = dataset)
output_file = os.path.join(output_dir, '_'.join(['deepsurv',TIMESTRING, 'rec_surv.pdf']))
print(output_file)
viz.plot_survival_curves(experiment_name = 'DeepSurv', output_file=output_file, **rec_dict)
def save_model(model, output_file):
model.save_weights(output_file)
if __name__ == '__main__':
args = parse_args()
print("Arguments:",args)
# Load Dataset
print("Loading datasets: " + args.dataset)
datasets = utils.load_datasets(args.dataset)
norm_vals = {
'mean' : datasets['train']['x'].mean(axis =0),
'std' : datasets['train']['x'].std(axis=0)
}
# Train Model
# TODO standardize location of logs + results => have them go into same directory with same UUID of experiment
tensor_log_dir = "/shared/data/logs/tensorboard_" + str(args.dataset) + "_" + str(uuid.uuid4())
logger = TensorboardLogger("experiments.deep_surv", tensor_log_dir, update_freq = 10)
model = deep_surv.load_model_from_json(args.model, args.weights)
if 'valid' in datasets:
valid_data = datasets['valid']
else:
valid_data = None
metrics = model.train(datasets['train'], valid_data, n_epochs = args.num_epochs, logger=logger,
update_fn = utils.get_optimizer_from_str(args.update_fn),
validation_frequency = 100)
# Evaluate Model
with open(args.model, 'r') as fp:
json_model = fp.read()
hyperparams = json.loads(json_model)
train_data = datasets['train']
if hyperparams['standardize']:
train_data = utils.standardize_dataset(train_data, norm_vals['mean'], norm_vals['std'])
metrics = evaluate_model(model, train_data)
print("Training metrics: " + str(metrics))
if 'valid' in datasets:
valid_data = datasets['valid']
if hyperparams['standardize']:
valid_data = utils.standardize_dataset(valid_data, norm_vals['mean'], norm_vals['std'])
metrics = evaluate_model(model, valid_data)
print("Valid metrics: " + str(metrics))
if 'test' in datasets:
test_dataset = utils.standardize_dataset(datasets['test'], norm_vals['mean'], norm_vals['std'])
metrics = evaluate_model(model, test_dataset, bootstrap=True)
print("Test metrics: " + str(metrics))
if 'viz' in datasets:
print("Saving Visualizations")
save_risk_surface_visualizations(model, datasets['viz'], norm_vals = norm_vals,
output_dir=args.results_dir, plot_error = args.plot_error,
experiment = args.experiment, trt_idx= args.treatment_idx)
if 'test' in datasets and args.treatment_idx is not None:
print("Calculating treatment recommendation survival curvs")
# We use the test dataset because these experiments don't have a viz dataset
save_treatment_rec_visualizations(model, test_dataset, output_dir=args.results_dir,
trt_idx = args.treatment_idx)
if args.results_dir:
_, model_str = os.path.split(args.model)
output_file = os.path.join(args.results_dir,"models") + model_str + str(uuid.uuid4()) + ".h5"
print("Saving model parameters to output file", output_file)
save_model(model, output_file)
exit(0)
| mit |
plotly/python-api | packages/python/plotly/plotly/figure_factory/_scatterplot.py | 2 | 44834 | from __future__ import absolute_import
import six
from plotly import exceptions, optional_imports
import plotly.colors as clrs
from plotly.figure_factory import utils
from plotly.graph_objs import graph_objs
from plotly.subplots import make_subplots
pd = optional_imports.get_module("pandas")
DIAG_CHOICES = ["scatter", "histogram", "box"]
VALID_COLORMAP_TYPES = ["cat", "seq"]
def endpts_to_intervals(endpts):
"""
Returns a list of intervals for categorical colormaps
Accepts a list or tuple of sequentially increasing numbers and returns
a list representation of the mathematical intervals with these numbers
as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]
:raises: (PlotlyError) If input is not a list or tuple
:raises: (PlotlyError) If the input contains a string
:raises: (PlotlyError) If any number does not increase after the
previous one in the sequence
"""
length = len(endpts)
# Check if endpts is a list or tuple
if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):
raise exceptions.PlotlyError(
"The intervals_endpts argument must "
"be a list or tuple of a sequence "
"of increasing numbers."
)
# Check if endpts contains only numbers
for item in endpts:
if isinstance(item, str):
raise exceptions.PlotlyError(
"The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers."
)
# Check if numbers in endpts are increasing
for k in range(length - 1):
if endpts[k] >= endpts[k + 1]:
raise exceptions.PlotlyError(
"The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers."
)
else:
intervals = []
# add -inf to intervals
intervals.append([float("-inf"), endpts[0]])
for k in range(length - 1):
interval = []
interval.append(endpts[k])
interval.append(endpts[k + 1])
intervals.append(interval)
# add +inf to intervals
intervals.append([endpts[length - 1], float("inf")])
return intervals
def hide_tick_labels_from_box_subplots(fig):
"""
Hides tick labels for box plots in scatterplotmatrix subplots.
"""
boxplot_xaxes = []
for trace in fig["data"]:
if trace["type"] == "box":
# stores the xaxes which correspond to boxplot subplots
# since we use xaxis1, xaxis2, etc, in plotly.py
boxplot_xaxes.append("xaxis{}".format(trace["xaxis"][1:]))
for xaxis in boxplot_xaxes:
fig["layout"][xaxis]["showticklabels"] = False
def validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):
"""
Validates basic inputs for FigureFactory.create_scatterplotmatrix()
:raises: (PlotlyError) If pandas is not imported
:raises: (PlotlyError) If pandas dataframe is not inputted
:raises: (PlotlyError) If pandas dataframe has <= 1 columns
:raises: (PlotlyError) If diagonal plot choice (diag) is not one of
the viable options
:raises: (PlotlyError) If colormap_type is not a valid choice
:raises: (PlotlyError) If kwargs contains 'size', 'color' or
'colorscale'
"""
if not pd:
raise ImportError(
"FigureFactory.scatterplotmatrix requires " "a pandas DataFrame."
)
# Check if pandas dataframe
if not isinstance(df, pd.core.frame.DataFrame):
raise exceptions.PlotlyError(
"Dataframe not inputed. Please "
"use a pandas dataframe to pro"
"duce a scatterplot matrix."
)
# Check if dataframe is 1 column or less
if len(df.columns) <= 1:
raise exceptions.PlotlyError(
"Dataframe has only one column. To "
"use the scatterplot matrix, use at "
"least 2 columns."
)
# Check that diag parameter is a valid selection
if diag not in DIAG_CHOICES:
raise exceptions.PlotlyError(
"Make sure diag is set to " "one of {}".format(DIAG_CHOICES)
)
# Check that colormap_types is a valid selection
if colormap_type not in VALID_COLORMAP_TYPES:
raise exceptions.PlotlyError(
"Must choose a valid colormap type. "
"Either 'cat' or 'seq' for a cate"
"gorical and sequential colormap "
"respectively."
)
# Check for not 'size' or 'color' in 'marker' of **kwargs
if "marker" in kwargs:
FORBIDDEN_PARAMS = ["size", "color", "colorscale"]
if any(param in kwargs["marker"] for param in FORBIDDEN_PARAMS):
raise exceptions.PlotlyError(
"Your kwargs dictionary cannot "
"include the 'size', 'color' or "
"'colorscale' key words inside "
"the marker dict since 'size' is "
"already an argument of the "
"scatterplot matrix function and "
"both 'color' and 'colorscale "
"are set internally."
)
def scatterplot(dataframe, headers, diag, size, height, width, title, **kwargs):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix without index
"""
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
# Insert traces into trace_list
for listy in dataframe:
for listx in dataframe:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(x=listx, showlegend=False)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(y=listx, name=None, showlegend=False)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
trace = graph_objs.Scatter(
x=listx, y=listy, mode="markers", showlegend=False, **kwargs
)
trace_list.append(trace)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
marker=dict(size=size),
showlegend=False,
**kwargs
)
trace_list.append(trace)
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
fig["layout"].update(height=height, width=width, title=title, showlegend=True)
hide_tick_labels_from_box_subplots(fig)
return fig
def scatterplot_dict(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix with both index and colormap picked.
Used if colormap is a dictionary with index values as keys pointing to
colors. Forces colormap_type to behave categorically because it would
not make sense colors are assigned to each index value and thus
implies that a categorical approach should be taken
"""
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx, marker=dict(color=theme[name]), showlegend=True
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[name]),
showlegend=True,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[name]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[name]),
showlegend=False,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[name]),
showlegend=False,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[name]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(trace_list[trace_index][name], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True, barmode="stack"
)
return fig
else:
fig["layout"].update(height=height, width=width, title=title, showlegend=True)
return fig
def scatterplot_theme(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix with both index and colormap picked
"""
# Check if index is made of string values
if isinstance(index_vals[0], str):
unique_index_vals = []
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals.append(name)
n_colors_len = len(unique_index_vals)
# Convert colormap to list of n RGB tuples
if colormap_type == "seq":
foo = clrs.color_parser(colormap, clrs.unlabel_rgb)
foo = clrs.n_colors(foo[0], foo[1], n_colors_len)
theme = clrs.color_parser(foo, clrs.label_rgb)
if colormap_type == "cat":
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(trace_list[trace_index][name], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height,
width=width,
title=title,
showlegend=True,
barmode="stack",
)
return fig
elif diag == "box":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
if endpts:
intervals = utils.endpts_to_intervals(endpts)
# Convert colormap to list of n RGB tuples
if colormap_type == "seq":
foo = clrs.color_parser(colormap, clrs.unlabel_rgb)
foo = clrs.n_colors(foo[0], foo[1], len(intervals))
theme = clrs.color_parser(foo, clrs.label_rgb)
if colormap_type == "cat":
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
interval_labels = {}
for interval in intervals:
interval_labels[str(interval)] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for interval in intervals:
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if interval[0] < index_vals[j] <= interval[1]:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
(kwargs["marker"]["color"]) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
marker=dict(size=size, color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
(kwargs["marker"]["color"]) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
marker=dict(size=size, color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
interval_labels[str(interval)] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(interval_labels)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for interval in intervals:
fig.append_trace(
trace_list[trace_index][str(interval)], y_index, x_index
)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height,
width=width,
title=title,
showlegend=True,
barmode="stack",
)
return fig
elif diag == "box":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
theme = colormap
# add a copy of rgb color to theme if it contains one color
if len(theme) <= 1:
theme.append(theme[0])
color = []
for incr in range(len(theme)):
color.append([1.0 / (len(theme) - 1) * incr, theme[incr]])
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Run through all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=listx, marker=dict(color=theme[0]), showlegend=False
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=listx, marker=dict(color=theme[0]), showlegend=False
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = index_vals
kwargs["marker"]["colorscale"] = color
kwargs["marker"]["showscale"] = True
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=True,
),
showlegend=False,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=listx, marker=dict(color=theme[0]), showlegend=False
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=listx, marker=dict(color=theme[0]), showlegend=False
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = index_vals
kwargs["marker"]["colorscale"] = color
kwargs["marker"]["showscale"] = False
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=False,
),
showlegend=False,
**kwargs
)
# Push the trace into list
trace_list.append(trace)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height,
width=width,
title=title,
showlegend=True,
barmode="stack",
)
return fig
elif diag == "box":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
def create_scatterplotmatrix(
df,
index=None,
endpts=None,
diag="scatter",
height=500,
width=500,
size=6,
title="Scatterplot Matrix",
colormap=None,
colormap_type="cat",
dataframe=None,
headers=None,
index_vals=None,
**kwargs
):
"""
Returns data for a scatterplot matrix;
**deprecated**,
use instead the plotly.graph_objects trace
:class:`plotly.graph_objects.Splom`.
:param (array) df: array of the data with column headers
:param (str) index: name of the index column in data array
:param (list|tuple) endpts: takes an increasing sequece of numbers
that defines intervals on the real line. They are used to group
the entries in an index of numbers into their corresponding
interval and therefore can be treated as categorical data
:param (str) diag: sets the chart type for the main diagonal plots.
The options are 'scatter', 'histogram' and 'box'.
:param (int|float) height: sets the height of the chart
:param (int|float) width: sets the width of the chart
:param (float) size: sets the marker size (in px)
:param (str) title: the title label of the scatterplot matrix
:param (str|tuple|list|dict) colormap: either a plotly scale name,
an rgb or hex color, a color tuple, a list of colors or a
dictionary. An rgb color is of the form 'rgb(x, y, z)' where
x, y and z belong to the interval [0, 255] and a color tuple is a
tuple of the form (a, b, c) where a, b and c belong to [0, 1].
If colormap is a list, it must contain valid color types as its
members.
If colormap is a dictionary, all the string entries in
the index column must be a key in colormap. In this case, the
colormap_type is forced to 'cat' or categorical
:param (str) colormap_type: determines how colormap is interpreted.
Valid choices are 'seq' (sequential) and 'cat' (categorical). If
'seq' is selected, only the first two colors in colormap will be
considered (when colormap is a list) and the index values will be
linearly interpolated between those two colors. This option is
forced if all index values are numeric.
If 'cat' is selected, a color from colormap will be assigned to
each category from index, including the intervals if endpts is
being used
:param (dict) **kwargs: a dictionary of scatterplot arguments
The only forbidden parameters are 'size', 'color' and
'colorscale' in 'marker'
Example 1: Vanilla Scatterplot Matrix
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Column 1', 'Column 2'])
>>> # Create scatterplot matrix
>>> fig = create_scatterplotmatrix(df)
>>> fig.show()
Example 2: Indexing a Column
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with index
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['A', 'B'])
>>> # Add another column of strings to the dataframe
>>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
... 'grape', 'pear', 'pear', 'apple', 'pear'])
>>> # Create scatterplot matrix
>>> fig = create_scatterplotmatrix(df, index='Fruit', size=10)
>>> fig.show()
Example 3: Styling the Diagonal Subplots
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with index
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['A', 'B', 'C', 'D'])
>>> # Add another column of strings to the dataframe
>>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
... 'grape', 'pear', 'pear', 'apple', 'pear'])
>>> # Create scatterplot matrix
>>> fig = create_scatterplotmatrix(df, diag='box', index='Fruit', height=1000,
... width=1000)
>>> fig.show()
Example 4: Use a Theme to Style the Subplots
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with random data
>>> df = pd.DataFrame(np.random.randn(100, 3),
... columns=['A', 'B', 'C'])
>>> # Create scatterplot matrix using a built-in
>>> # Plotly palette scale and indexing column 'A'
>>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
... colormap='Blues', height=800, width=800)
>>> fig.show()
Example 5: Example 4 with Interval Factoring
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with random data
>>> df = pd.DataFrame(np.random.randn(100, 3),
... columns=['A', 'B', 'C'])
>>> # Create scatterplot matrix using a list of 2 rgb tuples
>>> # and endpoints at -1, 0 and 1
>>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
... colormap=['rgb(140, 255, 50)',
... 'rgb(170, 60, 115)', '#6c4774',
... (0.5, 0.1, 0.8)],
... endpts=[-1, 0, 1], height=800, width=800)
>>> fig.show()
Example 6: Using the colormap as a Dictionary
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> import random
>>> # Create dataframe with random data
>>> df = pd.DataFrame(np.random.randn(100, 3),
... columns=['Column A',
... 'Column B',
... 'Column C'])
>>> # Add new color column to dataframe
>>> new_column = []
>>> strange_colors = ['turquoise', 'limegreen', 'goldenrod']
>>> for j in range(100):
... new_column.append(random.choice(strange_colors))
>>> df['Colors'] = pd.Series(new_column, index=df.index)
>>> # Create scatterplot matrix using a dictionary of hex color values
>>> # which correspond to actual color names in 'Colors' column
>>> fig = create_scatterplotmatrix(
... df, diag='box', index='Colors',
... colormap= dict(
... turquoise = '#00F5FF',
... limegreen = '#32CD32',
... goldenrod = '#DAA520'
... ),
... colormap_type='cat',
... height=800, width=800
... )
>>> fig.show()
"""
# TODO: protected until #282
if dataframe is None:
dataframe = []
if headers is None:
headers = []
if index_vals is None:
index_vals = []
validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs)
# Validate colormap
if isinstance(colormap, dict):
colormap = clrs.validate_colors_dict(colormap, "rgb")
elif (
isinstance(colormap, six.string_types)
and "rgb" not in colormap
and "#" not in colormap
):
if colormap not in clrs.PLOTLY_SCALES.keys():
raise exceptions.PlotlyError(
"If 'colormap' is a string, it must be the name "
"of a Plotly Colorscale. The available colorscale "
"names are {}".format(clrs.PLOTLY_SCALES.keys())
)
else:
# TODO change below to allow the correct Plotly colorscale
colormap = clrs.colorscale_to_colors(clrs.PLOTLY_SCALES[colormap])
# keep only first and last item - fix later
colormap = [colormap[0]] + [colormap[-1]]
colormap = clrs.validate_colors(colormap, "rgb")
else:
colormap = clrs.validate_colors(colormap, "rgb")
if not index:
for name in df:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
# Check for same data-type in df columns
utils.validate_dataframe(dataframe)
figure = scatterplot(
dataframe, headers, diag, size, height, width, title, **kwargs
)
return figure
else:
# Validate index selection
if index not in df:
raise exceptions.PlotlyError(
"Make sure you set the index "
"input variable to one of the "
"column names of your "
"dataframe."
)
index_vals = df[index].values.tolist()
for name in df:
if name != index:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
# check for same data-type in each df column
utils.validate_dataframe(dataframe)
utils.validate_index(index_vals)
# check if all colormap keys are in the index
# if colormap is a dictionary
if isinstance(colormap, dict):
for key in colormap:
if not all(index in colormap for index in index_vals):
raise exceptions.PlotlyError(
"If colormap is a "
"dictionary, all the "
"names in the index "
"must be keys."
)
figure = scatterplot_dict(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
)
return figure
else:
figure = scatterplot_theme(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
)
return figure
| mit |
xyguo/scikit-learn | sklearn/kernel_ridge.py | 31 | 6552 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 39 | 36062 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
hanjiepan/LEAP | visi2ms.py | 1 | 12276 | """
visi2ms.py: write visibility to an MS file
Copyright (C) 2017 Hanjie Pan
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Correspondence concerning LEAP should be addressed as follows:
Email: hanjie [Dot] pan [At] epfl [Dot] ch
Postal address: EPFL-IC-LCAV
Station 14
1015 Lausanne
Switzerland
"""
from __future__ import division
import subprocess
import sys
import re
import os
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
import matplotlib
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if sys.version_info[0] > 2:
sys.exit('Sorry casacore only runs on Python 2.')
else:
from casacore import tables as casa_tables
def run_wsclean(ms_file, channel_range=(0, 1), mgain=0.7, FoV=5,
max_iter=1000, auto_threshold=3, threshold=None,
output_img_size=512, intermediate_size=1024,
output_name_prefix='highres', quiet=False,
run_cs=False, cs_gain=0.1):
"""
run wsclean algorithm for a given measurement set (ms file).
:param ms_file: name of the measurement set
:param channel_range: frequency channel range.
The first one is inclusive but the second one is exclusive, i.e., (0, 1) select channel0 only.
:param mgain: gain for the CLEAN iteration
:param FoV: field of view (in degree). Default 5 degree
:param max_iter: maximum number of iterations of the CLEAN algorithm
:param output_img_size: output image size
:param intermediate_size: intermediate image size (must be no smaller than output_img_size)
:param output_name_prefix: prefix for various outputs from wsclean
:return:
"""
if quiet:
wsclean_mode = 'wsclean -quiet'
print('Running wsclean ...')
else:
wsclean_mode = 'wsclean'
assert output_img_size <= intermediate_size
pixel_size = FoV * 3600 / output_img_size # in arcsecond
if run_cs:
wsclean_mode += ' -iuwt -gain {cs_gain} '.format(cs_gain=cs_gain)
bash_cmd = '{wsclean} ' \
'-size {intermediate_size} {intermediate_size} ' \
'-trim {output_img_size} {output_img_size} ' \
'-scale {pixel_size}asec ' \
'-name {output_name_prefix} ' \
'-datacolumn DATA ' \
'-channelrange {freq_channel_min} {freq_channel_max} ' \
'-niter {max_iter} ' \
'-mgain {mgain} ' \
'-pol I ' \
'-weight briggs 0.0 ' \
'-weighting-rank-filter 3 '.format(
wsclean=wsclean_mode,
intermediate_size=intermediate_size,
output_img_size=output_img_size,
pixel_size=repr(pixel_size),
output_name_prefix=output_name_prefix,
freq_channel_min=channel_range[0],
freq_channel_max=channel_range[1],
max_iter=max_iter,
mgain=mgain,
auto_threshold=auto_threshold
)
if threshold is None:
bash_cmd += '-auto-threshold {auto_threshold} '.format(auto_threshold=auto_threshold)
else:
bash_cmd += '-threshold {threshold} '.format(threshold=threshold)
bash_cmd += '{ms_file} '.format(ms_file=ms_file)
# run wsclean
exitcode = subprocess.call(bash_cmd, shell=True)
return exitcode
def convert_clean_outputs(clean_output_prefix, result_image_prefix,
result_data_prefix, fig_file_format='png', dpi=600):
"""
convert the FITS images from wsclean to numpy array
:param clean_output_prefix: prefix of wsCLEAN outputs
:param result_image_prefix: prefix to be used for the converted numpy array
:return:
"""
# CLEAN point sources based one '-model.fits'
with fits.open(clean_output_prefix + '-model.fits') as handle:
# FITS data
src_model = handle[0].data.squeeze()
# CLEANed image
with fits.open(clean_output_prefix + '-image.fits') as handle:
# handle.info()
# FITS header info.
img_header = handle['PRIMARY'].header
# convert to world coordinate
w = WCS(img_header)
num_pixel_RA = img_header['NAXIS1']
num_pixel_DEC = img_header['NAXIS2']
RA_mesh, DEC_mesh = np.meshgrid(np.arange(num_pixel_RA) + 1,
np.arange(num_pixel_DEC) + 1)
pixcard = np.column_stack((RA_mesh.flatten('F'), DEC_mesh.flatten('F')))
RA_DEC_plt = w.dropaxis(3).dropaxis(2).all_pix2world(pixcard, 1)
RA_plt_grid = np.reshape(RA_DEC_plt[:, 0], (-1, num_pixel_RA), order='F')
DEC_plt_grid = np.reshape(RA_DEC_plt[:, 1], (-1, num_pixel_DEC), order='F')
# FITS data
img_data = handle[0].data.squeeze()
# dirty image
with fits.open(clean_output_prefix + '-dirty.fits') as handle:
dirty_img = handle[0].data.squeeze()
plt.figure(figsize=(5, 4), dpi=300).add_subplot(111)
plt.gca().locator_params(axis='x', nbins=6)
plt.pcolormesh(RA_plt_grid, DEC_plt_grid, img_data,
shading='gouraud', cmap='Spectral_r')
plt.xlabel('RA (J2000)')
plt.ylabel('DEC (J2000)')
plt.gca().invert_xaxis()
xlabels_original = plt.gca().get_xticks().tolist()
ylabels_original = plt.gca().get_yticks().tolist()
plt.close()
# in degree, minute, and second representation
xlabels_hms_all = []
for lable_idx, xlabels_original_loop in enumerate(xlabels_original):
xlabels_original_loop = float(xlabels_original_loop)
xlabels_dms = SkyCoord(
ra=xlabels_original_loop, dec=0, unit=units.degree
).to_string('hmsdms').split(' ')[0]
xlabels_dms = list(filter(None, re.split('[hms]+', xlabels_dms)))
if lable_idx == 1:
xlabels_dms = (
u'{0}h{1}m{2}s'
).format(xlabels_dms[0], xlabels_dms[1], xlabels_dms[2])
else:
xlabels_dms = (
u'{0}m{1}s'
).format(xlabels_dms[1], xlabels_dms[2])
xlabels_hms_all.append(xlabels_dms)
ylabels_all = [(u'{0:.2f}' + u'\u00B0').format(ylabels_loop)
for ylabels_loop in ylabels_original]
# use the re-formatted ticklabels to plot the figure again
plt.figure(figsize=(5, 4), dpi=300).add_subplot(111)
plt.pcolormesh(RA_plt_grid, DEC_plt_grid, img_data,
shading='gouraud', cmap='Spectral_r')
plt.xlabel('RA (J2000)')
plt.ylabel('DEC (J2000)')
plt.gca().invert_xaxis()
plt.gca().set_xticklabels(xlabels_hms_all, fontsize=9)
plt.gca().set_yticklabels(ylabels_all, fontsize=9)
plt.axis('image')
file_name = result_image_prefix + '-image.' + fig_file_format
plt.savefig(filename=file_name, format=fig_file_format,
dpi=dpi, transparent=True)
plt.close()
plt.figure(figsize=(5, 4), dpi=300).add_subplot(111)
plt.pcolormesh(RA_plt_grid, DEC_plt_grid, dirty_img,
shading='gouraud', cmap='Spectral_r')
plt.xlabel('RA (J2000)')
plt.ylabel('DEC (J2000)')
plt.gca().invert_xaxis()
plt.gca().set_xticklabels(xlabels_hms_all, fontsize=9)
plt.gca().set_yticklabels(ylabels_all, fontsize=9)
plt.axis('image')
file_name = result_image_prefix + '-dirty.' + fig_file_format
plt.savefig(filename=file_name, format=fig_file_format,
dpi=dpi, transparent=True)
plt.close()
# save image data as well as plotting axis labels
'''
here we flip the x-axis. in radioastronomy, the convention is that RA (the x-axis)
DECREASES from left to right.
By flipping the x-axis, RA INCREASES from left to right.
'''
CLEAN_data_file = result_data_prefix + '-CLEAN_data.npz'
np.savez(
CLEAN_data_file,
x_plt_CLEAN_rad=np.radians(RA_plt_grid),
y_plt_CLEAN_rad=np.radians(DEC_plt_grid),
img_clean=img_data,
img_dirty=dirty_img,
src_model=src_model,
xlabels_hms_all=xlabels_hms_all,
ylabels_dms_all=ylabels_all
)
return CLEAN_data_file
def update_visi_msfile(reference_ms_file, modified_ms_file,
visi, antenna1_idx, antenna2_idx, num_station):
"""
update a reference ms file with a new visibility data
:param reference_ms_file: the original ms file
:param modified_ms_file: the modified ms file with the updated visibilities
:param visi: new visibilities to be put in the copied ms file
:param antenna1_idx: coordinate of the first antenna of the visibility measurements
:param antenna2_idx: coordinate of the second antenna of the visibility measurements
:return:
"""
print('Copying table for modifications ...')
casa_tables.tablecopy(reference_ms_file, modified_ms_file)
print('Modifying visibilities in the new table ...')
with casa_tables.table(modified_ms_file, readonly=False, ack=False) as modified_table:
# swap axis so that:
# axis 0: cross-correlation index;
# axis 1: subband index;
# axis 2: STI index
visi = np.swapaxes(visi, 1, 2)
num_bands, num_sti = visi.shape[1:]
row_count = 0
for sti_count in range(num_sti):
visi_loop = np.zeros((num_station, num_station, num_bands), dtype=complex)
visi_loop[antenna1_idx, antenna2_idx, :] = visi[:, :, sti_count]
# so that axis 0: subband index;
# axis 1: cross-correlation index1
# axis 2: cross-corrleation index2
visi_loop = visi_loop.swapaxes(2, 0).swapaxes(2, 1)
for station2 in range(num_station):
for station1 in range(station2 + 1):
# dimension: num_subband x 4 (4 different polarisations: XX, XY, YX, YY)
visi_station1_station2 = modified_table.getcell('DATA', rownr=row_count)
visi_station1_station2[:num_bands, :] = \
visi_loop[:, station1, station2][:, np.newaxis]
# visi_station1_station2[:, :] = \
# visi_loop[:, station1, station2][:, np.newaxis]
# update visibility in the table
modified_table.putcell('DATA', rownr=row_count,
value=visi_station1_station2)
flag_station1_station2 = modified_table.getcell('FLAG', rownr=row_count)
flag_station1_station2[:num_bands, :] = False
modified_table.putcell('FLAG', rownr=row_count,
value=flag_station1_station2)
row_count += 1
assert modified_table.nrows() == row_count # sanity check
if __name__ == '__main__':
# for testing purposes
reference_ms_file = '/home/hpa/Documents/Data/BOOTES24_SB180-189.2ch8s_SIM_every50th.ms'
modified_ms_file = '/home/hpa/Documents/Data/BOOTES24_SB180-189.2ch8s_SIM_every50th_modi.ms'
num_station = 24
num_sti = 63
num_bands = 1
visi = np.random.randn(num_station * (num_station - 1), num_sti, num_bands) + \
1j * np.random.randn(num_station * (num_station - 1), num_sti, num_bands)
mask_mtx = (1 - np.eye(num_station, dtype=int)).astype(bool)
antenna2_idx, antenna1_idx = np.meshgrid(np.arange(num_station), np.arange(num_station))
antenna1_idx = np.extract(mask_mtx, antenna1_idx)
antenna2_idx = np.extract(mask_mtx, antenna2_idx)
update_visi_msfile(reference_ms_file, modified_ms_file,
visi, antenna1_idx, antenna2_idx, num_station)
| gpl-3.0 |
SummaLabs/DLS | app/backend-test/core_convertors/run02_test_kerasModel2DLS.py | 1 | 1111 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
os.environ['THEANO_FLAGS'] = "device=cpu"
import glob
import json
import networkx as nx
import matplotlib.pyplot as plt
from pprint import pprint
from app.backend.core.models.convertors import keras2dls
#########################################
pathWithDatasets='../../../data-test/test_caffe_models'
pathOutModels='../../../data/network/saved'
#########################################
if __name__ == '__main__':
lstModelsPaths = glob.glob('%s/*-kerasmodel.json' % pathWithDatasets)
pprint(lstModelsPaths)
#
for ii,pp in enumerate(lstModelsPaths):
theFinalDLSModel = keras2dls.convertKeras2DLS(pp, graphvizLayout='dot')
foutModel=os.path.abspath('%s/%s_converted.json' % (pathOutModels, os.path.splitext(os.path.basename(pp))[0]))
print ('[%d/%d] convert: %s --> [%s]' % (ii, len(lstModelsPaths), os.path.basename(pp), foutModel))
with open(foutModel, 'w') as f:
f.write(json.dumps(theFinalDLSModel, indent=4))
# nx.draw(theGraph, theGraphPos)
# plt.show()
| mit |
pyrolysis/kinetic_schemes | plot_primary.py | 2 | 10358 | """
Plot wood conversion and tar yield as mass fraction of original wood. Only
primary reactions from various kinetic pyrolysis schemes are considered.
References:
See comments in the function file for references to a particular kinetic scheme.
"""
import numpy as np
import matplotlib.pyplot as py
import functions as fn
# Parameters
# ------------------------------------------------------------------------------
T = 773 # temperature for rate constants, K
dt = 0.005 # time step, delta t
tmax = 25 # max time, s
t = np.linspace(0, tmax, num=tmax/dt) # time vector
nt = len(t) # total number of time steps
# Products from Wood Kinetic Schemes
# ------------------------------------------------------------------------------
# store concentrations from primary reactions on a mass basis as kg/m^3
# row = concentration calculated from a particular kinetic scheme
# column = concentration at time step
wood = np.ones((13, nt)) # wood concentration array
tar = np.zeros((13, nt)) # tar concentration array
# products from primary reactions of Blasi 1993, Blasi 2001, Chan 1985,
# Font 1990, Janse 2000, Koufopanos 199, Liden 1988, Papadikis 2010,
# Sadhukhan 2009, Thurner 1981
for i in range(1, nt):
wood[0, i], _, tar[0, i], _ = fn.blasi(wood[0, i-1], 0, tar[0, i-1], 0, T, dt)
wood[1, i], _, tar[1, i], _ = fn.blasibranca(wood[1, i-1], 0, tar[1, i-1], 0, T, dt)
wood[2, i], _, tar[2, i], _, _, _ = fn.chan(wood[2, i-1], 0, tar[2, i-1], 0, 0, 0, T, dt)
wood[3, i], _, tar[3, i], _ = fn.font1(wood[3, i-1], 0, tar[3, i-1], 0, T, dt)
wood[4, i], _, tar[4, i], _ = fn.font2(wood[4, i-1], 0, tar[4, i-1], 0, T, dt)
wood[5, i], _, tar[5, i], _ = fn.janse(wood[5, i-1], 0, tar[5, i-1], 0, T, dt)
wood[6, i], _, _, _, _ = fn.koufopanos(wood[6, i-1], 0, 0, 0, 0, T, dt)
wood[7, i], _, tar[7, i], _ = fn.liden(wood[7, i-1], 0, tar[7, i-1], 0, T, dt)
wood[8, i], _, tar[8, i], _ = fn.papadikis(wood[8, i-1], 0, tar[8, i-1], 0, T, dt)
wood[9, i], _, _, _, _ = fn.sadhukhan(wood[9, i-1], 0, 0, 0, 0, T, dt)
wood[10, i], _, tar[10, i], _ = fn.thurner(wood[10, i-1], 0, tar[10, i-1], 0, T, dt)
# Products from Ranzi 2014 Kinetic Scheme
# ------------------------------------------------------------------------------
# weight percent (%) cellulose, hemicellulose, lignin for beech wood
wtcell = 48
wthemi = 28
wtlig = 24
# arrays for Ranzi main groups and products as mass fractions, (-)
pmcell, pcell = fn.ranzicell(1, wtcell, T, dt, nt) # cellulose
pmhemi, phemi = fn.ranzihemi(1, wthemi, T, dt, nt) # hemicellulose
pmligc, pligc = fn.ranziligc(1, wtlig, T, dt, nt) # lignin-c
pmligh, pligh = fn.ranziligh(1, wtlig, T, dt, nt) # lignin-h
pmligo, pligo = fn.ranziligo(1, wtlig, T, dt, nt) # lignin-o
# chemical species from Ranzi as mass fraction, (-)
co = pcell[0] + phemi[0] + pligc[0] + pligh[0] + pligo[0] # CO
co2 = pcell[1] + phemi[1] + pligc[1] + pligh[1] + pligo[1] # CO2
ch2o = pcell[2] + phemi[2] + pligc[2] + pligh[2] + pligo[2] # CH2O
hcooh = pcell[3] + phemi[3] + pligc[3] + pligh[3] + pligo[3] # HCOOH
ch3oh = pcell[4] + phemi[4] + pligc[4] + pligh[4] + pligo[4] # CH3OH
ch4 = pcell[5] + phemi[5] + pligc[5] + pligh[5] + pligo[5] # CH4
glyox = pcell[6] + phemi[6] + pligc[6] + pligh[6] + pligo[6] # Glyox (C2H2O2)
c2h4 = pcell[7] + phemi[7] + pligc[7] + pligh[7] + pligo[7] # C2H4
c2h4o = pcell[8] + phemi[8] + pligc[8] + pligh[8] + pligo[8] # C2H4O
haa = pcell[9] + phemi[9] + pligc[9] + pligh[9] + pligo[9] # HAA (C2H4O2)
c2h5oh = pcell[10] + phemi[10] + pligc[10] + pligh[10] + pligo[10] # C2H5OH
c3h6o = pcell[11] + phemi[11] + pligc[11] + pligh[11] + pligo[11] # C3H6O
xyl = pcell[12] + phemi[12] + pligc[12] + pligh[12] + pligo[12] # Xylose (C5H10O5)
c6h6o = pcell[13] + phemi[13] + pligc[13] + pligh[13] + pligo[13] # C6H6O
hmfu = pcell[14] + phemi[14] + pligc[14] + pligh[14] + pligo[14] # HMFU (C6H6O3)
lvg = pcell[15] + phemi[15] + pligc[15] + pligh[15] + pligo[15] # LVG (C6H10O2)
coum = pcell[16] + phemi[16] + pligc[16] + pligh[16] + pligo[16] # p-Coumaryl (C9H10O2)
fe2macr = pcell[17] + phemi[17] + pligc[17] + pligh[17] + pligo[17] # FE2MACR (C11H12O4)
h2 = pcell[18] + phemi[18] + pligc[18] + pligh[18] + pligo[18] # H2
h2o = pcell[19] + phemi[19] + pligc[19] + pligh[19] + pligo[19] # H2O
char = pcell[20] + phemi[20] + pligc[20] + pligh[20] + pligo[20] # Char
# groups from Ranzi for wood and tar as mass fraction, (-)
wood_ranzi = pmcell[0] + pmhemi[0] + pmligc[0] + pmligh[0] + pmligo[0]
tar_ranzi = ch2o + hcooh + ch3oh + glyox + c2h4o + haa + c2h5oh + c3h6o + xyl + c6h6o + hmfu + lvg + coum + fe2macr
wood[11] = wood_ranzi
tar[11] = tar_ranzi
# Products from Miller and Bellan 1997 Kinetic Scheme
# ------------------------------------------------------------------------------
# composition of beech wood from Table 2 in paper
wtcell = 0.48 # cellulose mass fraction, (-)
wthemi = 0.28 # hemicellulose mass fraction, (-)
wtlig = 0.24 # lignin mass fraction, (-)
cella = np.ones(nt)*wtcell
hemia = np.ones(nt)*wthemi
liga = np.ones(nt)*wtlig
tar1, tar2, tar3 = np.zeros(nt), np.zeros(nt), np.zeros(nt)
for i in range(1, nt):
cella[i], _, tar1[i], _ = fn.millercell_noR1(cella[i-1], 0, tar1[i-1], 0, T, dt)
hemia[i], _, tar2[i], _ = fn.millerhemi_noR1(hemia[i-1], 0, tar2[i-1], 0, T, dt)
liga[i], _, tar3[i], _ = fn.millerlig_noR1(liga[i-1], 0, tar3[i-1], 0, T, dt)
wood[12] = cella + hemia + liga
tar[12] = tar1 + tar2 + tar3
# Plot Results
# ------------------------------------------------------------------------------
py.ion()
py.close('all')
def despine():
# remove top, right axis and tick marks
ax = py.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')
py.figure(1)
py.plot(t, wood[0], lw=2, label='Blasi 1993')
py.plot(t, wood[1], lw=2, label='Blasi 2001')
py.plot(t, wood[2], lw=2, label='Chan 1985')
py.plot(t, wood[3], lw=2, label='Font1 1990')
py.plot(t, wood[4], lw=2, label='Font2 1990')
py.plot(t, wood[5], lw=2, label='Janse 2000')
py.plot(t, wood[6], lw=2, label='Koufopanos 2000')
py.plot(t, wood[7], '--', lw=2, label='Liden 1988')
py.plot(t, wood[8], 'o', mec='g', mew=1, markevery=200, label='Papadikis 2010')
py.plot(t, wood[9], '--', lw=2, label='Sadhukhan 2009')
py.plot(t, wood[10], 'yo', mec='y', mew=1, markevery=200, label='Thurner 1981')
py.plot(t, wood[11], '--', lw=2, label='Ranzi 2014')
py.plot(t, wood[12], '--', lw=2, label='Miller 1997')
py.title('Primary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Wood Conversion (mass fraction)')
py.legend(loc='best', numpoints=1, fontsize=11, frameon=False)
py.grid()
despine()
py.figure(2)
py.plot(t, tar[0], lw=2, label='Blasi 1993')
py.plot(t, tar[1], lw=2, label='Blasi 2001')
py.plot(t, tar[2], lw=2, label='Chan 1985')
py.plot(t, tar[3], lw=2, label='Font1 1990')
py.plot(t, tar[4], lw=2, label='Font2 1990')
py.plot(t, tar[5], lw=2, label='Janse 2000')
py.plot(t, tar[7], lw=2, label='Liden 1988')
py.plot(t, tar[8], 'o', mew=1, markevery=200, label='Papadikis 2010')
py.plot(t, tar[10], 'ro', mec='r', markevery=200, label='Thurner 1981')
py.plot(t, tar[11], '--', lw=2, label='Ranzi 2014')
py.plot(t, tar[11]+h2o, '--', lw=2, label='Ranzi 2014 + H2O')
py.plot(t, tar[12], '--', lw=2, label='Miller 1997')
py.title('Primary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Tar Yield (mass fraction)')
py.legend(loc='best', numpoints=1, fontsize=11, frameon=False)
py.grid()
despine()
# plots for black and white figures
# note Blasi 1993 and Thurner 1981 have same wood conversion and tar yields
# note Chan 1985 and Papadikis 2010 have same wood conversion and tar yields
py.figure(3)
py.plot(t, wood[0], c='k', ls='-', label='Blasi 1993, Thurner 1981')
py.plot(t, wood[1], c='k', ls='-', marker='s', markevery=200, label='Blasi 2001')
py.plot(t, wood[2], c='k', ls='--', label='Chan 1985, Papadikis 2010')
py.plot(t, wood[3], c='k', ls=':', lw=2, label='Font1 1990')
py.plot(t, wood[4], c='k', ls=':', lw=2, marker='o', markevery=200, label='Font2 1990')
py.plot(t, wood[5], c='k', ls='-', marker='v', markevery=200, label='Janse 2000')
py.plot(t, wood[6], c='k', ls='--', marker='v', markevery=200, label='Koufopanos 2000')
py.plot(t, wood[7], c='k', ls='-', marker='*', markevery=200, label='Liden 1988')
# py.plot(t, wood[8], c='k', ls='-', marker='s', markevery=200, label='Papadikis 2010')
py.plot(t, wood[9], c='k', ls='--', marker='s', markevery=200, label='Sadhukhan 2009')
# py.plot(t, wood[10], c='k', ls='', marker='^', markevery=200, label='Thurner 1981')
py.plot(t, wood[11], c='k', ls='-', marker='x', markevery=200, label='Ranzi 2014')
py.plot(t, wood[12], c='k', ls='-', marker='p', markevery=200, label='Miller 1997')
# py.title('Primary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Wood Conversion (mass fraction)')
py.legend(loc='best', ncol=2, numpoints=1, fontsize=11, frameon=False)
despine()
py.figure(4)
py.plot(t, tar[0], c='k', ls='-', label='Blasi 1993, Thurner 1981')
py.plot(t, tar[1], c='k', ls='-', marker='s', markevery=200, label='Blasi 2001')
py.plot(t, tar[2], c='k', ls='--', label='Chan 1985, Papadikis 2010')
py.plot(t, tar[3], c='k', ls=':', lw=2, label='Font1 1990')
py.plot(t, tar[4], c='k', ls=':', lw=2, marker='o', markevery=200, label='Font2 1990')
py.plot(t, tar[5], c='k', ls='-', marker='v', markevery=200, label='Janse 2000')
py.plot(t, tar[7], c='k', ls='-', marker='*', markevery=200, label='Liden 1988')
# py.plot(t, tar[8], c='k', ls='', marker='s', markevery=200, label='Papadikis 2010')
# py.plot(t, tar[10], c='k', ls='', marker='^', markevery=200, label='Thurner 1981')
py.plot(t, tar[11], c='k', ls='-', marker='x', markevery=200, label='Ranzi 2014')
py.plot(t, tar[11]+h2o, c='k', ls='-', marker='+', markevery=200, label='Ranzi 2014 + H2O')
py.plot(t, tar[12], c='k', ls='-', marker='p', markevery=200, label='Miller 1997')
# py.title('Primary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Tar Yield (mass fraction)')
py.legend(loc='best', ncol=2, numpoints=1, fontsize=11, frameon=False)
despine()
| mit |
laurentgo/arrow | python/pyarrow/tests/test_array.py | 1 | 83167 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections.abc import Iterable
import datetime
import decimal
import hypothesis as h
import hypothesis.strategies as st
import itertools
import pickle
import pytest
import struct
import sys
import weakref
import numpy as np
try:
import pickle5
except ImportError:
pickle5 = None
import pyarrow as pa
import pyarrow.tests.strategies as past
def test_total_bytes_allocated():
assert pa.total_allocated_bytes() == 0
def test_weakref():
arr = pa.array([1, 2, 3])
wr = weakref.ref(arr)
assert wr() is not None
del arr
assert wr() is None
def test_getitem_NULL():
arr = pa.array([1, None, 2])
assert arr[1].as_py() is None
assert arr[1].is_valid is False
assert isinstance(arr[1], pa.Int64Scalar)
def test_constructor_raises():
# This could happen by wrong capitalization.
# ARROW-2638: prevent calling extension class constructors directly
with pytest.raises(TypeError):
pa.Array([1, 2])
def test_list_format():
arr = pa.array([[1], None, [2, 3, None]])
result = arr.to_string()
expected = """\
[
[
1
],
null,
[
2,
3,
null
]
]"""
assert result == expected
def test_string_format():
arr = pa.array(['', None, 'foo'])
result = arr.to_string()
expected = """\
[
"",
null,
"foo"
]"""
assert result == expected
def test_long_array_format():
arr = pa.array(range(100))
result = arr.to_string(window=2)
expected = """\
[
0,
1,
...
98,
99
]"""
assert result == expected
def test_binary_format():
arr = pa.array([b'\x00', b'', None, b'\x01foo', b'\x80\xff'])
result = arr.to_string()
expected = """\
[
00,
,
null,
01666F6F,
80FF
]"""
assert result == expected
def test_binary_total_values_length():
arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'],
type='binary')
large_arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'],
type='large_binary')
assert arr.total_values_length == 22
assert arr.slice(1, 3).total_values_length == 11
assert large_arr.total_values_length == 22
assert large_arr.slice(1, 3).total_values_length == 11
def test_to_numpy_zero_copy():
arr = pa.array(range(10))
np_arr = arr.to_numpy()
# check for zero copy (both arrays using same memory)
arrow_buf = arr.buffers()[1]
assert arrow_buf.address == np_arr.ctypes.data
arr = None
import gc
gc.collect()
# Ensure base is still valid
assert np_arr.base is not None
expected = np.arange(10)
np.testing.assert_array_equal(np_arr, expected)
def test_to_numpy_unsupported_types():
# ARROW-2871: Some primitive types are not yet supported in to_numpy
bool_arr = pa.array([True, False, True])
with pytest.raises(ValueError):
bool_arr.to_numpy()
result = bool_arr.to_numpy(zero_copy_only=False)
expected = np.array([True, False, True])
np.testing.assert_array_equal(result, expected)
null_arr = pa.array([None, None, None])
with pytest.raises(ValueError):
null_arr.to_numpy()
result = null_arr.to_numpy(zero_copy_only=False)
expected = np.array([None, None, None], dtype=object)
np.testing.assert_array_equal(result, expected)
arr = pa.array([1, 2, None])
with pytest.raises(ValueError, match="with 1 nulls"):
arr.to_numpy()
def test_to_numpy_writable():
arr = pa.array(range(10))
np_arr = arr.to_numpy()
# by default not writable for zero-copy conversion
with pytest.raises(ValueError):
np_arr[0] = 10
np_arr2 = arr.to_numpy(zero_copy_only=False, writable=True)
np_arr2[0] = 10
assert arr[0].as_py() == 0
# when asking for writable, cannot do zero-copy
with pytest.raises(ValueError):
arr.to_numpy(zero_copy_only=True, writable=True)
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_to_numpy_datetime64(unit):
arr = pa.array([1, 2, 3], pa.timestamp(unit))
expected = np.array([1, 2, 3], dtype="datetime64[{}]".format(unit))
np_arr = arr.to_numpy()
np.testing.assert_array_equal(np_arr, expected)
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_to_numpy_timedelta64(unit):
arr = pa.array([1, 2, 3], pa.duration(unit))
expected = np.array([1, 2, 3], dtype="timedelta64[{}]".format(unit))
np_arr = arr.to_numpy()
np.testing.assert_array_equal(np_arr, expected)
def test_to_numpy_dictionary():
# ARROW-7591
arr = pa.array(["a", "b", "a"]).dictionary_encode()
expected = np.array(["a", "b", "a"], dtype=object)
np_arr = arr.to_numpy(zero_copy_only=False)
np.testing.assert_array_equal(np_arr, expected)
@pytest.mark.pandas
def test_to_pandas_zero_copy():
import gc
arr = pa.array(range(10))
for i in range(10):
series = arr.to_pandas()
assert sys.getrefcount(series) == 2
series = None # noqa
assert sys.getrefcount(arr) == 2
for i in range(10):
arr = pa.array(range(10))
series = arr.to_pandas()
arr = None
gc.collect()
# Ensure base is still valid
# Because of py.test's assert inspection magic, if you put getrefcount
# on the line being examined, it will be 1 higher than you expect
base_refcount = sys.getrefcount(series.values.base)
assert base_refcount == 2
series.sum()
@pytest.mark.nopandas
@pytest.mark.pandas
def test_asarray():
# ensure this is tested both when pandas is present or not (ARROW-6564)
arr = pa.array(range(4))
# The iterator interface gives back an array of Int64Value's
np_arr = np.asarray([_ for _ in arr])
assert np_arr.tolist() == [0, 1, 2, 3]
assert np_arr.dtype == np.dtype('O')
assert type(np_arr[0]) == pa.lib.Int64Value
# Calling with the arrow array gives back an array with 'int64' dtype
np_arr = np.asarray(arr)
assert np_arr.tolist() == [0, 1, 2, 3]
assert np_arr.dtype == np.dtype('int64')
# An optional type can be specified when calling np.asarray
np_arr = np.asarray(arr, dtype='str')
assert np_arr.tolist() == ['0', '1', '2', '3']
# If PyArrow array has null values, numpy type will be changed as needed
# to support nulls.
arr = pa.array([0, 1, 2, None])
assert arr.type == pa.int64()
np_arr = np.asarray(arr)
elements = np_arr.tolist()
assert elements[:3] == [0., 1., 2.]
assert np.isnan(elements[3])
assert np_arr.dtype == np.dtype('float64')
# DictionaryType data will be converted to dense numpy array
arr = pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, 0, 1]), pa.array(['a', 'b', 'c']))
np_arr = np.asarray(arr)
assert np_arr.dtype == np.dtype('object')
assert np_arr.tolist() == ['a', 'b', 'c', 'a', 'b']
@pytest.mark.parametrize('ty', [
None,
pa.null(),
pa.int8(),
pa.string()
])
def test_nulls(ty):
arr = pa.nulls(3, type=ty)
expected = pa.array([None, None, None], type=ty)
assert len(arr) == 3
assert arr.equals(expected)
if ty is None:
assert arr.type == pa.null()
else:
assert arr.type == ty
def test_array_from_scalar():
today = datetime.date.today()
now = datetime.datetime.now()
oneday = datetime.timedelta(days=1)
cases = [
(None, 1, pa.array([None])),
(None, 10, pa.nulls(10)),
(-1, 3, pa.array([-1, -1, -1], type=pa.int64())),
(2.71, 2, pa.array([2.71, 2.71], type=pa.float64())),
("string", 4, pa.array(["string"] * 4)),
(
pa.scalar(8, type=pa.uint8()),
17,
pa.array([8] * 17, type=pa.uint8())
),
(pa.scalar(None), 3, pa.array([None, None, None])),
(pa.scalar(True), 11, pa.array([True] * 11)),
(today, 2, pa.array([today] * 2)),
(now, 10, pa.array([now] * 10)),
(now.time(), 9, pa.array([now.time()] * 9)),
(oneday, 4, pa.array([oneday] * 4)),
(False, 9, pa.array([False] * 9)),
([1, 2], 2, pa.array([[1, 2], [1, 2]])),
(
pa.scalar([-1, 3], type=pa.large_list(pa.int8())),
5,
pa.array([[-1, 3]] * 5, type=pa.large_list(pa.int8()))
),
({'a': 1, 'b': 2}, 3, pa.array([{'a': 1, 'b': 2}] * 3))
]
for value, size, expected in cases:
arr = pa.repeat(value, size)
assert len(arr) == size
assert arr.equals(expected)
if expected.type == pa.null():
assert arr.null_count == size
else:
assert arr.null_count == 0
def test_array_from_dictionary_scalar():
dictionary = ['foo', 'bar', 'baz']
arr = pa.DictionaryArray.from_arrays([2, 1, 2, 0], dictionary=dictionary)
result = pa.repeat(arr[0], 5)
expected = pa.DictionaryArray.from_arrays([2] * 5, dictionary=dictionary)
assert result.equals(expected)
result = pa.repeat(arr[3], 5)
expected = pa.DictionaryArray.from_arrays([0] * 5, dictionary=dictionary)
assert result.equals(expected)
def test_array_getitem():
arr = pa.array(range(10, 15))
lst = arr.to_pylist()
for idx in range(-len(arr), len(arr)):
assert arr[idx].as_py() == lst[idx]
for idx in range(-2 * len(arr), -len(arr)):
with pytest.raises(IndexError):
arr[idx]
for idx in range(len(arr), 2 * len(arr)):
with pytest.raises(IndexError):
arr[idx]
def test_array_slice():
arr = pa.array(range(10))
sliced = arr.slice(2)
expected = pa.array(range(2, 10))
assert sliced.equals(expected)
sliced2 = arr.slice(2, 4)
expected2 = pa.array(range(2, 6))
assert sliced2.equals(expected2)
# 0 offset
assert arr.slice(0).equals(arr)
# Slice past end of array
assert len(arr.slice(len(arr))) == 0
with pytest.raises(IndexError):
arr.slice(-1)
# Test slice notation
assert arr[2:].equals(arr.slice(2))
assert arr[2:5].equals(arr.slice(2, 3))
assert arr[-5:].equals(arr.slice(len(arr) - 5))
n = len(arr)
for start in range(-n * 2, n * 2):
for stop in range(-n * 2, n * 2):
assert arr[start:stop].to_pylist() == arr.to_pylist()[start:stop]
def test_array_slice_negative_step():
# ARROW-2714
np_arr = np.arange(20)
arr = pa.array(np_arr)
chunked_arr = pa.chunked_array([arr])
cases = [
slice(None, None, -1),
slice(None, 6, -2),
slice(10, 6, -2),
slice(8, None, -2),
slice(2, 10, -2),
slice(10, 2, -2),
slice(None, None, 2),
slice(0, 10, 2),
]
for case in cases:
result = arr[case]
expected = pa.array(np_arr[case])
assert result.equals(expected)
result = pa.record_batch([arr], names=['f0'])[case]
expected = pa.record_batch([expected], names=['f0'])
assert result.equals(expected)
result = chunked_arr[case]
expected = pa.chunked_array([np_arr[case]])
assert result.equals(expected)
def test_array_diff():
# ARROW-6252
arr1 = pa.array(['foo'], type=pa.utf8())
arr2 = pa.array(['foo', 'bar', None], type=pa.utf8())
arr3 = pa.array([1, 2, 3])
arr4 = pa.array([[], [1], None], type=pa.list_(pa.int64()))
assert arr1.diff(arr1) == ''
assert arr1.diff(arr2) == '''
@@ -1, +1 @@
+"bar"
+null
'''
assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64'
assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64'
assert arr1.diff(arr4).strip() == ('# Array types differed: string vs '
'list<item: int64>')
def test_array_iter():
arr = pa.array(range(10))
for i, j in zip(range(10), arr):
assert i == j.as_py()
assert isinstance(arr, Iterable)
def test_struct_array_slice():
# ARROW-2311: slicing nested arrays needs special care
ty = pa.struct([pa.field('a', pa.int8()),
pa.field('b', pa.float32())])
arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5},
{'a': 5, 'b': 6.5}]
def test_array_factory_invalid_type():
class MyObject:
pass
arr = np.array([MyObject()])
with pytest.raises(ValueError):
pa.array(arr)
def test_array_ref_to_ndarray_base():
arr = np.array([1, 2, 3])
refcount = sys.getrefcount(arr)
arr2 = pa.array(arr) # noqa
assert sys.getrefcount(arr) == (refcount + 1)
def test_array_eq():
# ARROW-2150 / ARROW-9445: we define the __eq__ behavior to be
# data equality (not element-wise equality)
arr1 = pa.array([1, 2, 3], type=pa.int32())
arr2 = pa.array([1, 2, 3], type=pa.int32())
arr3 = pa.array([1, 2, 3], type=pa.int64())
assert (arr1 == arr2) is True
assert (arr1 != arr2) is False
assert (arr1 == arr3) is False
assert (arr1 != arr3) is True
def test_array_from_buffers():
values_buf = pa.py_buffer(np.int16([4, 5, 6, 7]))
nulls_buf = pa.py_buffer(np.uint8([0b00001101]))
arr = pa.Array.from_buffers(pa.int16(), 4, [nulls_buf, values_buf])
assert arr.type == pa.int16()
assert arr.to_pylist() == [4, None, 6, 7]
arr = pa.Array.from_buffers(pa.int16(), 4, [None, values_buf])
assert arr.type == pa.int16()
assert arr.to_pylist() == [4, 5, 6, 7]
arr = pa.Array.from_buffers(pa.int16(), 3, [nulls_buf, values_buf],
offset=1)
assert arr.type == pa.int16()
assert arr.to_pylist() == [None, 6, 7]
with pytest.raises(TypeError):
pa.Array.from_buffers(pa.int16(), 3, ['', ''], offset=1)
def test_string_binary_from_buffers():
array = pa.array(["a", None, "b", "c"])
buffers = array.buffers()
copied = pa.StringArray.from_buffers(
len(array), buffers[1], buffers[2], buffers[0], array.null_count,
array.offset)
assert copied.to_pylist() == ["a", None, "b", "c"]
binary_copy = pa.Array.from_buffers(pa.binary(), len(array),
array.buffers(), array.null_count,
array.offset)
assert binary_copy.to_pylist() == [b"a", None, b"b", b"c"]
copied = pa.StringArray.from_buffers(
len(array), buffers[1], buffers[2], buffers[0])
assert copied.to_pylist() == ["a", None, "b", "c"]
sliced = array[1:]
buffers = sliced.buffers()
copied = pa.StringArray.from_buffers(
len(sliced), buffers[1], buffers[2], buffers[0], -1, sliced.offset)
assert copied.to_pylist() == [None, "b", "c"]
assert copied.null_count == 1
# Slice but exclude all null entries so that we don't need to pass
# the null bitmap.
sliced = array[2:]
buffers = sliced.buffers()
copied = pa.StringArray.from_buffers(
len(sliced), buffers[1], buffers[2], None, -1, sliced.offset)
assert copied.to_pylist() == ["b", "c"]
assert copied.null_count == 0
@pytest.mark.parametrize('list_type_factory', [pa.list_, pa.large_list])
def test_list_from_buffers(list_type_factory):
ty = list_type_factory(pa.int16())
array = pa.array([[0, 1, 2], None, [], [3, 4, 5]], type=ty)
assert array.type == ty
buffers = array.buffers()
with pytest.raises(ValueError):
# No children
pa.Array.from_buffers(ty, 4, [None, buffers[1]])
child = pa.Array.from_buffers(pa.int16(), 6, buffers[2:])
copied = pa.Array.from_buffers(ty, 4, buffers[:2], children=[child])
assert copied.equals(array)
with pytest.raises(ValueError):
# too many children
pa.Array.from_buffers(ty, 4, [None, buffers[1]],
children=[child, child])
def test_struct_from_buffers():
ty = pa.struct([pa.field('a', pa.int16()), pa.field('b', pa.utf8())])
array = pa.array([{'a': 0, 'b': 'foo'}, None, {'a': 5, 'b': ''}],
type=ty)
buffers = array.buffers()
with pytest.raises(ValueError):
# No children
pa.Array.from_buffers(ty, 3, [None, buffers[1]])
children = [pa.Array.from_buffers(pa.int16(), 3, buffers[1:3]),
pa.Array.from_buffers(pa.utf8(), 3, buffers[3:])]
copied = pa.Array.from_buffers(ty, 3, buffers[:1], children=children)
assert copied.equals(array)
with pytest.raises(ValueError):
# not enough many children
pa.Array.from_buffers(ty, 3, [buffers[0]],
children=children[:1])
def test_struct_from_arrays():
a = pa.array([4, 5, 6], type=pa.int64())
b = pa.array(["bar", None, ""])
c = pa.array([[1, 2], None, [3, None]])
expected_list = [
{'a': 4, 'b': 'bar', 'c': [1, 2]},
{'a': 5, 'b': None, 'c': None},
{'a': 6, 'b': '', 'c': [3, None]},
]
# From field names
arr = pa.StructArray.from_arrays([a, b, c], ["a", "b", "c"])
assert arr.type == pa.struct(
[("a", a.type), ("b", b.type), ("c", c.type)])
assert arr.to_pylist() == expected_list
with pytest.raises(ValueError):
pa.StructArray.from_arrays([a, b, c], ["a", "b"])
arr = pa.StructArray.from_arrays([], [])
assert arr.type == pa.struct([])
assert arr.to_pylist() == []
# From fields
fa = pa.field("a", a.type, nullable=False)
fb = pa.field("b", b.type)
fc = pa.field("c", c.type)
arr = pa.StructArray.from_arrays([a, b, c], fields=[fa, fb, fc])
assert arr.type == pa.struct([fa, fb, fc])
assert not arr.type[0].nullable
assert arr.to_pylist() == expected_list
with pytest.raises(ValueError):
pa.StructArray.from_arrays([a, b, c], fields=[fa, fb])
arr = pa.StructArray.from_arrays([], fields=[])
assert arr.type == pa.struct([])
assert arr.to_pylist() == []
# Inconsistent fields
fa2 = pa.field("a", pa.int32())
with pytest.raises(ValueError, match="int64 vs int32"):
pa.StructArray.from_arrays([a, b, c], fields=[fa2, fb, fc])
def test_dictionary_from_numpy():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
mask = np.array([False, False, True, False, False, False])
d1 = pa.DictionaryArray.from_arrays(indices, dictionary)
d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)
assert d1.indices.to_pylist() == indices.tolist()
assert d1.indices.to_pylist() == indices.tolist()
assert d1.dictionary.to_pylist() == dictionary.tolist()
assert d2.dictionary.to_pylist() == dictionary.tolist()
for i in range(len(indices)):
assert d1[i].as_py() == dictionary[indices[i]]
if mask[i]:
assert d2[i].as_py() is None
else:
assert d2[i].as_py() == dictionary[indices[i]]
def test_dictionary_from_boxed_arrays():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
iarr = pa.array(indices)
darr = pa.array(dictionary)
d1 = pa.DictionaryArray.from_arrays(iarr, darr)
assert d1.indices.to_pylist() == indices.tolist()
assert d1.dictionary.to_pylist() == dictionary.tolist()
for i in range(len(indices)):
assert d1[i].as_py() == dictionary[indices[i]]
def test_dictionary_from_arrays_boundscheck():
indices1 = pa.array([0, 1, 2, 0, 1, 2])
indices2 = pa.array([0, -1, 2])
indices3 = pa.array([0, 1, 2, 3])
dictionary = pa.array(['foo', 'bar', 'baz'])
# Works fine
pa.DictionaryArray.from_arrays(indices1, dictionary)
with pytest.raises(pa.ArrowException):
pa.DictionaryArray.from_arrays(indices2, dictionary)
with pytest.raises(pa.ArrowException):
pa.DictionaryArray.from_arrays(indices3, dictionary)
# If we are confident that the indices are "safe" we can pass safe=False to
# disable the boundschecking
pa.DictionaryArray.from_arrays(indices2, dictionary, safe=False)
def test_dictionary_indices():
# https://issues.apache.org/jira/browse/ARROW-6882
indices = pa.array([0, 1, 2, 0, 1, 2])
dictionary = pa.array(['foo', 'bar', 'baz'])
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
arr.indices.validate(full=True)
@pytest.mark.parametrize(('list_array_type', 'list_type_factory'),
[(pa.ListArray, pa.list_),
(pa.LargeListArray, pa.large_list)])
def test_list_from_arrays(list_array_type, list_type_factory):
offsets_arr = np.array([0, 2, 5, 8], dtype='i4')
offsets = pa.array(offsets_arr, type='int32')
pyvalues = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h']
values = pa.array(pyvalues, type='binary')
result = list_array_type.from_arrays(offsets, values)
expected = pa.array([pyvalues[:2], pyvalues[2:5], pyvalues[5:8]],
type=list_type_factory(pa.binary()))
assert result.equals(expected)
# With nulls
offsets = [0, None, 2, 6]
values = [b'a', b'b', b'c', b'd', b'e', b'f']
result = list_array_type.from_arrays(offsets, values)
expected = pa.array([values[:2], None, values[2:]],
type=list_type_factory(pa.binary()))
assert result.equals(expected)
# Another edge case
offsets2 = [0, 2, None, 6]
result = list_array_type.from_arrays(offsets2, values)
expected = pa.array([values[:2], values[2:], None],
type=list_type_factory(pa.binary()))
assert result.equals(expected)
# raise on invalid array
offsets = [1, 3, 10]
values = np.arange(5)
with pytest.raises(ValueError):
list_array_type.from_arrays(offsets, values)
# Non-monotonic offsets
offsets = [0, 3, 2, 6]
values = list(range(6))
result = list_array_type.from_arrays(offsets, values)
with pytest.raises(ValueError):
result.validate(full=True)
def test_map_from_arrays():
offsets_arr = np.array([0, 2, 5, 8], dtype='i4')
offsets = pa.array(offsets_arr, type='int32')
pykeys = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h']
pyitems = list(range(len(pykeys)))
pypairs = list(zip(pykeys, pyitems))
pyentries = [pypairs[:2], pypairs[2:5], pypairs[5:8]]
keys = pa.array(pykeys, type='binary')
items = pa.array(pyitems, type='i4')
result = pa.MapArray.from_arrays(offsets, keys, items)
expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32()))
assert result.equals(expected)
# With nulls
offsets = [0, None, 2, 6]
pykeys = [b'a', b'b', b'c', b'd', b'e', b'f']
pyitems = [1, 2, 3, None, 4, 5]
pypairs = list(zip(pykeys, pyitems))
pyentries = [pypairs[:2], None, pypairs[2:]]
keys = pa.array(pykeys, type='binary')
items = pa.array(pyitems, type='i4')
result = pa.MapArray.from_arrays(offsets, keys, items)
expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32()))
assert result.equals(expected)
# check invalid usage
offsets = [0, 1, 3, 5]
keys = np.arange(5)
items = np.arange(5)
_ = pa.MapArray.from_arrays(offsets, keys, items)
# raise on invalid offsets
with pytest.raises(ValueError):
pa.MapArray.from_arrays(offsets + [6], keys, items)
# raise on length of keys != items
with pytest.raises(ValueError):
pa.MapArray.from_arrays(offsets, keys, np.concatenate([items, items]))
# raise on keys with null
keys_with_null = list(keys)[:-1] + [None]
assert len(keys_with_null) == len(items)
with pytest.raises(ValueError):
pa.MapArray.from_arrays(offsets, keys_with_null, items)
def test_fixed_size_list_from_arrays():
values = pa.array(range(12), pa.int64())
result = pa.FixedSizeListArray.from_arrays(values, 4)
assert result.to_pylist() == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
assert result.type.equals(pa.list_(pa.int64(), 4))
# raise on invalid values / list_size
with pytest.raises(ValueError):
pa.FixedSizeListArray.from_arrays(values, -4)
with pytest.raises(ValueError):
# array with list size 0 cannot be constructed with from_arrays
pa.FixedSizeListArray.from_arrays(pa.array([], pa.int64()), 0)
with pytest.raises(ValueError):
# length of values not multiple of 5
pa.FixedSizeListArray.from_arrays(values, 5)
def test_union_from_dense():
binary = pa.array([b'a', b'b', b'c', b'd'], type='binary')
int64 = pa.array([1, 2, 3], type='int64')
types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')
logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8')
value_offsets = pa.array([1, 0, 0, 2, 1, 2, 3], type='int32')
py_value = [b'b', 1, b'a', b'c', 2, 3, b'd']
def check_result(result, expected_field_names, expected_type_codes,
expected_type_code_values):
result.validate(full=True)
actual_field_names = [result.type[i].name
for i in range(result.type.num_fields)]
assert actual_field_names == expected_field_names
assert result.type.mode == "dense"
assert result.type.type_codes == expected_type_codes
assert result.to_pylist() == py_value
assert expected_type_code_values.equals(result.type_codes)
assert value_offsets.equals(result.offsets)
assert result.field(0).equals(binary)
assert result.field(1).equals(int64)
with pytest.raises(KeyError):
result.field(-1)
with pytest.raises(KeyError):
result.field(2)
# without field names and type codes
check_result(pa.UnionArray.from_dense(types, value_offsets,
[binary, int64]),
expected_field_names=['0', '1'],
expected_type_codes=[0, 1],
expected_type_code_values=types)
# with field names
check_result(pa.UnionArray.from_dense(types, value_offsets,
[binary, int64],
['bin', 'int']),
expected_field_names=['bin', 'int'],
expected_type_codes=[0, 1],
expected_type_code_values=types)
# with type codes
check_result(pa.UnionArray.from_dense(logical_types, value_offsets,
[binary, int64],
type_codes=[11, 13]),
expected_field_names=['0', '1'],
expected_type_codes=[11, 13],
expected_type_code_values=logical_types)
# with field names and type codes
check_result(pa.UnionArray.from_dense(logical_types, value_offsets,
[binary, int64],
['bin', 'int'], [11, 13]),
expected_field_names=['bin', 'int'],
expected_type_codes=[11, 13],
expected_type_code_values=logical_types)
# Bad type ids
arr = pa.UnionArray.from_dense(logical_types, value_offsets,
[binary, int64])
with pytest.raises(pa.ArrowInvalid):
arr.validate(full=True)
arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64],
type_codes=[11, 13])
with pytest.raises(pa.ArrowInvalid):
arr.validate(full=True)
# Offset larger than child size
bad_offsets = pa.array([0, 0, 1, 2, 1, 2, 4], type='int32')
arr = pa.UnionArray.from_dense(types, bad_offsets, [binary, int64])
with pytest.raises(pa.ArrowInvalid):
arr.validate(full=True)
def test_union_from_sparse():
binary = pa.array([b'a', b' ', b'b', b'c', b' ', b' ', b'd'],
type='binary')
int64 = pa.array([0, 1, 0, 0, 2, 3, 0], type='int64')
types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')
logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8')
py_value = [b'a', 1, b'b', b'c', 2, 3, b'd']
def check_result(result, expected_field_names, expected_type_codes,
expected_type_code_values):
result.validate(full=True)
assert result.to_pylist() == py_value
actual_field_names = [result.type[i].name
for i in range(result.type.num_fields)]
assert actual_field_names == expected_field_names
assert result.type.mode == "sparse"
assert result.type.type_codes == expected_type_codes
assert expected_type_code_values.equals(result.type_codes)
assert result.field(0).equals(binary)
assert result.field(1).equals(int64)
with pytest.raises(pa.ArrowTypeError):
result.offsets
with pytest.raises(KeyError):
result.field(-1)
with pytest.raises(KeyError):
result.field(2)
# without field names and type codes
check_result(pa.UnionArray.from_sparse(types, [binary, int64]),
expected_field_names=['0', '1'],
expected_type_codes=[0, 1],
expected_type_code_values=types)
# with field names
check_result(pa.UnionArray.from_sparse(types, [binary, int64],
['bin', 'int']),
expected_field_names=['bin', 'int'],
expected_type_codes=[0, 1],
expected_type_code_values=types)
# with type codes
check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64],
type_codes=[11, 13]),
expected_field_names=['0', '1'],
expected_type_codes=[11, 13],
expected_type_code_values=logical_types)
# with field names and type codes
check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64],
['bin', 'int'],
[11, 13]),
expected_field_names=['bin', 'int'],
expected_type_codes=[11, 13],
expected_type_code_values=logical_types)
# Bad type ids
arr = pa.UnionArray.from_sparse(logical_types, [binary, int64])
with pytest.raises(pa.ArrowInvalid):
arr.validate(full=True)
arr = pa.UnionArray.from_sparse(types, [binary, int64],
type_codes=[11, 13])
with pytest.raises(pa.ArrowInvalid):
arr.validate(full=True)
# Invalid child length
with pytest.raises(pa.ArrowInvalid):
arr = pa.UnionArray.from_sparse(logical_types, [binary, int64[1:]])
def test_union_array_slice():
# ARROW-2314
arr = pa.UnionArray.from_sparse(pa.array([0, 0, 1, 1], type=pa.int8()),
[pa.array(["a", "b", "c", "d"]),
pa.array([1, 2, 3, 4])])
assert arr[1:].to_pylist() == ["b", 3, 4]
binary = pa.array([b'a', b'b', b'c', b'd'], type='binary')
int64 = pa.array([1, 2, 3], type='int64')
types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')
value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32')
arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64])
lst = arr.to_pylist()
for i in range(len(arr)):
for j in range(i, len(arr)):
assert arr[i:j].to_pylist() == lst[i:j]
def _check_cast_case(case, *, safe=True, check_array_construction=True):
in_data, in_type, out_data, out_type = case
if isinstance(out_data, pa.Array):
assert out_data.type == out_type
expected = out_data
else:
expected = pa.array(out_data, type=out_type)
# check casting an already created array
if isinstance(in_data, pa.Array):
assert in_data.type == in_type
in_arr = in_data
else:
in_arr = pa.array(in_data, type=in_type)
casted = in_arr.cast(out_type, safe=safe)
casted.validate(full=True)
assert casted.equals(expected)
# constructing an array with out type which optionally involves casting
# for more see ARROW-1949
if check_array_construction:
in_arr = pa.array(in_data, type=out_type, safe=safe)
assert in_arr.equals(expected)
def test_cast_integers_safe():
safe_cases = [
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='i4'), pa.int32()),
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='u4'), pa.uint16()),
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()),
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='f8'), pa.float64())
]
for case in safe_cases:
_check_cast_case(case)
unsafe_cases = [
(np.array([50000], dtype='i4'), 'int32', 'int16'),
(np.array([70000], dtype='i4'), 'int32', 'uint16'),
(np.array([-1], dtype='i4'), 'int32', 'uint16'),
(np.array([50000], dtype='u2'), 'uint16', 'int16')
]
for in_data, in_type, out_type in unsafe_cases:
in_arr = pa.array(in_data, type=in_type)
with pytest.raises(pa.ArrowInvalid):
in_arr.cast(out_type)
def test_cast_none():
# ARROW-3735: Ensure that calling cast(None) doesn't segfault.
arr = pa.array([1, 2, 3])
with pytest.raises(ValueError):
arr.cast(None)
def test_cast_list_to_primitive():
# ARROW-8070: cast segfaults on unsupported cast from list<binary> to utf8
arr = pa.array([[1, 2], [3, 4]])
with pytest.raises(NotImplementedError):
arr.cast(pa.int8())
arr = pa.array([[b"a", b"b"], [b"c"]], pa.list_(pa.binary()))
with pytest.raises(NotImplementedError):
arr.cast(pa.binary())
def test_slice_chunked_array_zero_chunks():
# ARROW-8911
arr = pa.chunked_array([], type='int8')
assert arr.num_chunks == 0
result = arr[:]
assert result.equals(arr)
# Do not crash
arr[:5]
def test_cast_chunked_array():
arrays = [pa.array([1, 2, 3]), pa.array([4, 5, 6])]
carr = pa.chunked_array(arrays)
target = pa.float64()
casted = carr.cast(target)
expected = pa.chunked_array([x.cast(target) for x in arrays])
assert casted.equals(expected)
def test_cast_chunked_array_empty():
# ARROW-8142
for typ1, typ2 in [(pa.dictionary(pa.int8(), pa.string()), pa.string()),
(pa.int64(), pa.int32())]:
arr = pa.chunked_array([], type=typ1)
result = arr.cast(typ2)
expected = pa.chunked_array([], type=typ2)
assert result.equals(expected)
def test_chunked_array_data_warns():
with pytest.warns(FutureWarning):
res = pa.chunked_array([[]]).data
assert isinstance(res, pa.ChunkedArray)
def test_cast_integers_unsafe():
# We let NumPy do the unsafe casting
unsafe_cases = [
(np.array([50000], dtype='i4'), 'int32',
np.array([50000], dtype='i2'), pa.int16()),
(np.array([70000], dtype='i4'), 'int32',
np.array([70000], dtype='u2'), pa.uint16()),
(np.array([-1], dtype='i4'), 'int32',
np.array([-1], dtype='u2'), pa.uint16()),
(np.array([50000], dtype='u2'), pa.uint16(),
np.array([50000], dtype='i2'), pa.int16())
]
for case in unsafe_cases:
_check_cast_case(case, safe=False)
def test_floating_point_truncate_safe():
safe_cases = [
(np.array([1.0, 2.0, 3.0], dtype='float32'), 'float32',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([1.0, 2.0, 3.0], dtype='float64'), 'float64',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([-10.0, 20.0, -30.0], dtype='float64'), 'float64',
np.array([-10, 20, -30], dtype='i4'), pa.int32()),
]
for case in safe_cases:
_check_cast_case(case, safe=True)
def test_floating_point_truncate_unsafe():
unsafe_cases = [
(np.array([1.1, 2.2, 3.3], dtype='float32'), 'float32',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([1.1, 2.2, 3.3], dtype='float64'), 'float64',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([-10.1, 20.2, -30.3], dtype='float64'), 'float64',
np.array([-10, 20, -30], dtype='i4'), pa.int32()),
]
for case in unsafe_cases:
# test safe casting raises
with pytest.raises(pa.ArrowInvalid, match='truncated'):
_check_cast_case(case, safe=True)
# test unsafe casting truncates
_check_cast_case(case, safe=False)
def test_decimal_to_int_safe():
safe_cases = [
(
[decimal.Decimal("123456"), None, decimal.Decimal("-912345")],
pa.decimal128(32, 5),
[123456, None, -912345],
pa.int32()
),
(
[decimal.Decimal("1234"), None, decimal.Decimal("-9123")],
pa.decimal128(19, 10),
[1234, None, -9123],
pa.int16()
),
(
[decimal.Decimal("123"), None, decimal.Decimal("-91")],
pa.decimal128(19, 10),
[123, None, -91],
pa.int8()
),
]
for case in safe_cases:
_check_cast_case(case)
_check_cast_case(case, safe=True)
def test_decimal_to_int_value_out_of_bounds():
out_of_bounds_cases = [
(
np.array([
decimal.Decimal("1234567890123"),
None,
decimal.Decimal("-912345678901234")
]),
pa.decimal128(32, 5),
[1912276171, None, -135950322],
pa.int32()
),
(
[decimal.Decimal("123456"), None, decimal.Decimal("-912345678")],
pa.decimal128(32, 5),
[-7616, None, -19022],
pa.int16()
),
(
[decimal.Decimal("1234"), None, decimal.Decimal("-9123")],
pa.decimal128(32, 5),
[-46, None, 93],
pa.int8()
),
]
for case in out_of_bounds_cases:
# test safe casting raises
with pytest.raises(pa.ArrowInvalid,
match='Integer value out of bounds'):
_check_cast_case(case)
# XXX `safe=False` can be ignored when constructing an array
# from a sequence of Python objects (ARROW-8567)
_check_cast_case(case, safe=False, check_array_construction=False)
def test_decimal_to_int_non_integer():
non_integer_cases = [
(
[
decimal.Decimal("123456.21"),
None,
decimal.Decimal("-912345.13")
],
pa.decimal128(32, 5),
[123456, None, -912345],
pa.int32()
),
(
[decimal.Decimal("1234.134"), None, decimal.Decimal("-9123.1")],
pa.decimal128(19, 10),
[1234, None, -9123],
pa.int16()
),
(
[decimal.Decimal("123.1451"), None, decimal.Decimal("-91.21")],
pa.decimal128(19, 10),
[123, None, -91],
pa.int8()
),
]
for case in non_integer_cases:
# test safe casting raises
msg_regexp = 'Rescaling decimal value would cause data loss'
with pytest.raises(pa.ArrowInvalid, match=msg_regexp):
_check_cast_case(case)
_check_cast_case(case, safe=False)
def test_decimal_to_decimal():
arr = pa.array(
[decimal.Decimal("1234.12"), None],
type=pa.decimal128(19, 10)
)
result = arr.cast(pa.decimal128(15, 6))
expected = pa.array(
[decimal.Decimal("1234.12"), None],
type=pa.decimal128(15, 6)
)
assert result.equals(expected)
with pytest.raises(pa.ArrowInvalid,
match='Rescaling decimal value would cause data loss'):
result = arr.cast(pa.decimal128(9, 1))
result = arr.cast(pa.decimal128(9, 1), safe=False)
expected = pa.array(
[decimal.Decimal("1234.1"), None],
type=pa.decimal128(9, 1)
)
assert result.equals(expected)
with pytest.raises(pa.ArrowInvalid,
match='Decimal value does not fit in precision'):
result = arr.cast(pa.decimal128(5, 2))
def test_safe_cast_nan_to_int_raises():
arr = pa.array([np.nan, 1.])
with pytest.raises(pa.ArrowInvalid, match='truncated'):
arr.cast(pa.int64(), safe=True)
def test_cast_signed_to_unsigned():
safe_cases = [
(np.array([0, 1, 2, 3], dtype='i1'), pa.uint8(),
np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()),
(np.array([0, 1, 2, 3], dtype='i2'), pa.uint16(),
np.array([0, 1, 2, 3], dtype='u2'), pa.uint16())
]
for case in safe_cases:
_check_cast_case(case)
def test_cast_from_null():
in_data = [None] * 3
in_type = pa.null()
out_types = [
pa.null(),
pa.uint8(),
pa.float16(),
pa.utf8(),
pa.binary(),
pa.binary(10),
pa.list_(pa.int16()),
pa.list_(pa.int32(), 4),
pa.large_list(pa.uint8()),
pa.decimal128(19, 4),
pa.timestamp('us'),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz='Europe/Paris'),
pa.duration('us'),
pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.list_(pa.int8())),
pa.field('c', pa.string())]),
]
for out_type in out_types:
_check_cast_case((in_data, in_type, in_data, out_type))
out_types = [
pa.dictionary(pa.int32(), pa.string()),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
]
in_arr = pa.array(in_data, type=pa.null())
for out_type in out_types:
with pytest.raises(NotImplementedError):
in_arr.cast(out_type)
def test_cast_string_to_number_roundtrip():
cases = [
(pa.array(["1", "127", "-128"]),
pa.array([1, 127, -128], type=pa.int8())),
(pa.array([None, "18446744073709551615"]),
pa.array([None, 18446744073709551615], type=pa.uint64())),
]
for in_arr, expected in cases:
casted = in_arr.cast(expected.type, safe=True)
casted.validate(full=True)
assert casted.equals(expected)
casted_back = casted.cast(in_arr.type, safe=True)
casted_back.validate(full=True)
assert casted_back.equals(in_arr)
def test_cast_dictionary():
arr = pa.DictionaryArray.from_arrays(
pa.array([0, 1, None], type=pa.int32()),
pa.array(["foo", "bar"]))
assert arr.cast(pa.string()).equals(pa.array(["foo", "bar", None]))
with pytest.raises(pa.ArrowInvalid):
# Shouldn't crash (ARROW-7077)
arr.cast(pa.int32())
def test_view():
# ARROW-5992
arr = pa.array(['foo', 'bar', 'baz'], type=pa.utf8())
expected = pa.array(['foo', 'bar', 'baz'], type=pa.binary())
assert arr.view(pa.binary()).equals(expected)
assert arr.view('binary').equals(expected)
def test_unique_simple():
cases = [
(pa.array([1, 2, 3, 1, 2, 3]), pa.array([1, 2, 3])),
(pa.array(['foo', None, 'bar', 'foo']),
pa.array(['foo', None, 'bar'])),
(pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()),
pa.array(['foo', None, 'bar'], pa.large_binary())),
]
for arr, expected in cases:
result = arr.unique()
assert result.equals(expected)
result = pa.chunked_array([arr]).unique()
assert result.equals(expected)
def test_value_counts_simple():
cases = [
(pa.array([1, 2, 3, 1, 2, 3]),
pa.array([1, 2, 3]),
pa.array([2, 2, 2], type=pa.int64())),
(pa.array(['foo', None, 'bar', 'foo']),
pa.array(['foo', None, 'bar']),
pa.array([2, 1, 1], type=pa.int64())),
(pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()),
pa.array(['foo', None, 'bar'], pa.large_binary()),
pa.array([2, 1, 1], type=pa.int64())),
]
for arr, expected_values, expected_counts in cases:
for arr_in in (arr, pa.chunked_array([arr])):
result = arr_in.value_counts()
assert result.type.equals(
pa.struct([pa.field("values", arr.type),
pa.field("counts", pa.int64())]))
assert result.field("values").equals(expected_values)
assert result.field("counts").equals(expected_counts)
def test_unique_value_counts_dictionary_type():
indices = pa.array([3, 0, 0, 0, 1, 1, 3, 0, 1, 3, 0, 1])
dictionary = pa.array(['foo', 'bar', 'baz', 'qux'])
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
unique_result = arr.unique()
expected = pa.DictionaryArray.from_arrays(indices.unique(), dictionary)
assert unique_result.equals(expected)
result = arr.value_counts()
result.field('values').equals(unique_result)
result.field('counts').equals(pa.array([3, 5, 4], type='int64'))
def test_dictionary_encode_simple():
cases = [
(pa.array([1, 2, 3, None, 1, 2, 3]),
pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, None, 0, 1, 2], type='int32'),
[1, 2, 3])),
(pa.array(['foo', None, 'bar', 'foo']),
pa.DictionaryArray.from_arrays(
pa.array([0, None, 1, 0], type='int32'),
['foo', 'bar'])),
(pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()),
pa.DictionaryArray.from_arrays(
pa.array([0, None, 1, 0], type='int32'),
pa.array(['foo', 'bar'], type=pa.large_binary()))),
]
for arr, expected in cases:
result = arr.dictionary_encode()
assert result.equals(expected)
result = pa.chunked_array([arr]).dictionary_encode()
assert result.num_chunks == 1
assert result.chunk(0).equals(expected)
result = pa.chunked_array([], type=arr.type).dictionary_encode()
assert result.num_chunks == 0
assert result.type == expected.type
def test_dictionary_encode_sliced():
cases = [
(pa.array([1, 2, 3, None, 1, 2, 3])[1:-1],
pa.DictionaryArray.from_arrays(
pa.array([0, 1, None, 2, 0], type='int32'),
[2, 3, 1])),
(pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'])[1:-1],
pa.DictionaryArray.from_arrays(
pa.array([0, 1, 0], type='int32'),
['foo', 'bar'])),
(pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'],
type=pa.large_string())[1:-1],
pa.DictionaryArray.from_arrays(
pa.array([0, 1, 0], type='int32'),
pa.array(['foo', 'bar'], type=pa.large_string()))),
]
for arr, expected in cases:
result = arr.dictionary_encode()
assert result.equals(expected)
result = pa.chunked_array([arr]).dictionary_encode()
assert result.num_chunks == 1
assert result.type == expected.type
assert result.chunk(0).equals(expected)
result = pa.chunked_array([], type=arr.type).dictionary_encode()
assert result.num_chunks == 0
assert result.type == expected.type
# ARROW-9143 dictionary_encode after slice was segfaulting
array = pa.array(['foo', 'bar', 'baz'])
array.slice(1).dictionary_encode()
def test_dictionary_encode_zero_length():
# User-facing experience of ARROW-7008
arr = pa.array([], type=pa.string())
encoded = arr.dictionary_encode()
assert len(encoded.dictionary) == 0
encoded.validate(full=True)
def test_cast_time32_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int32'),
type=pa.time32('s'))
expected = pa.array([0, 1, 2], type='i4')
result = arr.cast('i4')
assert result.equals(expected)
def test_cast_time64_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.time64('us'))
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
def test_cast_timestamp_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.timestamp('us'))
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
def test_cast_date32_to_int():
arr = pa.array([0, 1, 2], type='i4')
result1 = arr.cast('date32')
result2 = result1.cast('i4')
expected1 = pa.array([
datetime.date(1970, 1, 1),
datetime.date(1970, 1, 2),
datetime.date(1970, 1, 3)
]).cast('date32')
assert result1.equals(expected1)
assert result2.equals(arr)
def test_cast_duration_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.duration('us'))
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
def test_cast_binary_to_utf8():
binary_arr = pa.array([b'foo', b'bar', b'baz'], type=pa.binary())
utf8_arr = binary_arr.cast(pa.utf8())
expected = pa.array(['foo', 'bar', 'baz'], type=pa.utf8())
assert utf8_arr.equals(expected)
non_utf8_values = [('mañana').encode('utf-16-le')]
non_utf8_binary = pa.array(non_utf8_values)
assert non_utf8_binary.type == pa.binary()
with pytest.raises(ValueError):
non_utf8_binary.cast(pa.string())
non_utf8_all_null = pa.array(non_utf8_values, mask=np.array([True]),
type=pa.binary())
# No error
casted = non_utf8_all_null.cast(pa.string())
assert casted.null_count == 1
def test_cast_date64_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.date64())
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
def test_date64_from_builtin_datetime():
val1 = datetime.datetime(2000, 1, 1, 12, 34, 56, 123456)
val2 = datetime.datetime(2000, 1, 1)
result = pa.array([val1, val2], type='date64')
result2 = pa.array([val1.date(), val2.date()], type='date64')
assert result.equals(result2)
as_i8 = result.view('int64')
assert as_i8[0].as_py() == as_i8[1].as_py()
@pytest.mark.parametrize(('ty', 'values'), [
('bool', [True, False, True]),
('uint8', range(0, 255)),
('int8', range(0, 128)),
('uint16', range(0, 10)),
('int16', range(0, 10)),
('uint32', range(0, 10)),
('int32', range(0, 10)),
('uint64', range(0, 10)),
('int64', range(0, 10)),
('float', [0.0, 0.1, 0.2]),
('double', [0.0, 0.1, 0.2]),
('string', ['a', 'b', 'c']),
('binary', [b'a', b'b', b'c']),
(pa.binary(3), [b'abc', b'bcd', b'cde'])
])
def test_cast_identities(ty, values):
arr = pa.array(values, type=ty)
assert arr.cast(ty).equals(arr)
pickle_test_parametrize = pytest.mark.parametrize(
('data', 'typ'),
[
([True, False, True, True], pa.bool_()),
([1, 2, 4, 6], pa.int64()),
([1.0, 2.5, None], pa.float64()),
(['a', None, 'b'], pa.string()),
([], None),
([[1, 2], [3]], pa.list_(pa.int64())),
([[4, 5], [6]], pa.large_list(pa.int16())),
([['a'], None, ['b', 'c']], pa.list_(pa.string())),
([(1, 'a'), (2, 'c'), None],
pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())]))
]
)
@pickle_test_parametrize
def test_array_pickle(data, typ):
# Allocate here so that we don't have any Arrow data allocated.
# This is needed to ensure that allocator tests can be reliable.
array = pa.array(data, type=typ)
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
result = pickle.loads(pickle.dumps(array, proto))
assert array.equals(result)
def test_array_pickle_dictionary():
# not included in the above as dictionary array cannot be created with
# the pa.array function
array = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1], ['a', 'b', 'c'])
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
result = pickle.loads(pickle.dumps(array, proto))
assert array.equals(result)
@h.given(
past.arrays(
past.all_types,
size=st.integers(min_value=0, max_value=10)
)
)
def test_pickling(arr):
data = pickle.dumps(arr)
restored = pickle.loads(data)
assert arr.equals(restored)
@pickle_test_parametrize
def test_array_pickle5(data, typ):
# Test zero-copy pickling with protocol 5 (PEP 574)
picklemod = pickle5 or pickle
if pickle5 is None and picklemod.HIGHEST_PROTOCOL < 5:
pytest.skip("need pickle5 package or Python 3.8+")
array = pa.array(data, type=typ)
addresses = [buf.address if buf is not None else 0
for buf in array.buffers()]
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
pickled = picklemod.dumps(array, proto, buffer_callback=buffers.append)
result = picklemod.loads(pickled, buffers=buffers)
assert array.equals(result)
result_addresses = [buf.address if buf is not None else 0
for buf in result.buffers()]
assert result_addresses == addresses
@pytest.mark.parametrize(
'narr',
[
np.arange(10, dtype=np.int64),
np.arange(10, dtype=np.int32),
np.arange(10, dtype=np.int16),
np.arange(10, dtype=np.int8),
np.arange(10, dtype=np.uint64),
np.arange(10, dtype=np.uint32),
np.arange(10, dtype=np.uint16),
np.arange(10, dtype=np.uint8),
np.arange(10, dtype=np.float64),
np.arange(10, dtype=np.float32),
np.arange(10, dtype=np.float16),
]
)
def test_to_numpy_roundtrip(narr):
arr = pa.array(narr)
assert narr.dtype == arr.to_numpy().dtype
np.testing.assert_array_equal(narr, arr.to_numpy())
np.testing.assert_array_equal(narr[:6], arr[:6].to_numpy())
np.testing.assert_array_equal(narr[2:], arr[2:].to_numpy())
np.testing.assert_array_equal(narr[2:6], arr[2:6].to_numpy())
def test_array_uint64_from_py_over_range():
arr = pa.array([2 ** 63], type=pa.uint64())
expected = pa.array(np.array([2 ** 63], dtype='u8'))
assert arr.equals(expected)
def test_array_conversions_no_sentinel_values():
arr = np.array([1, 2, 3, 4], dtype='int8')
refcount = sys.getrefcount(arr)
arr2 = pa.array(arr) # noqa
assert sys.getrefcount(arr) == (refcount + 1)
assert arr2.type == 'int8'
arr3 = pa.array(np.array([1, np.nan, 2, 3, np.nan, 4], dtype='float32'),
type='float32')
assert arr3.type == 'float32'
assert arr3.null_count == 0
def test_time32_time64_from_integer():
# ARROW-4111
result = pa.array([1, 2, None], type=pa.time32('s'))
expected = pa.array([datetime.time(second=1),
datetime.time(second=2), None],
type=pa.time32('s'))
assert result.equals(expected)
result = pa.array([1, 2, None], type=pa.time32('ms'))
expected = pa.array([datetime.time(microsecond=1000),
datetime.time(microsecond=2000), None],
type=pa.time32('ms'))
assert result.equals(expected)
result = pa.array([1, 2, None], type=pa.time64('us'))
expected = pa.array([datetime.time(microsecond=1),
datetime.time(microsecond=2), None],
type=pa.time64('us'))
assert result.equals(expected)
result = pa.array([1000, 2000, None], type=pa.time64('ns'))
expected = pa.array([datetime.time(microsecond=1),
datetime.time(microsecond=2), None],
type=pa.time64('ns'))
assert result.equals(expected)
def test_binary_string_pandas_null_sentinels():
# ARROW-6227
def _check_case(ty):
arr = pa.array(['string', np.nan], type=ty, from_pandas=True)
expected = pa.array(['string', None], type=ty)
assert arr.equals(expected)
_check_case('binary')
_check_case('utf8')
def test_pandas_null_sentinels_raise_error():
# ARROW-6227
cases = [
([None, np.nan], 'null'),
(['string', np.nan], 'binary'),
(['string', np.nan], 'utf8'),
(['string', np.nan], 'large_binary'),
(['string', np.nan], 'large_utf8'),
([b'string', np.nan], pa.binary(6)),
([True, np.nan], pa.bool_()),
([decimal.Decimal('0'), np.nan], pa.decimal128(12, 2)),
([0, np.nan], pa.date32()),
([0, np.nan], pa.date32()),
([0, np.nan], pa.date64()),
([0, np.nan], pa.time32('s')),
([0, np.nan], pa.time64('us')),
([0, np.nan], pa.timestamp('us')),
([0, np.nan], pa.duration('us')),
]
for case, ty in cases:
# Both types of exceptions are raised. May want to clean that up
with pytest.raises((ValueError, TypeError)):
pa.array(case, type=ty)
# from_pandas option suppresses failure
result = pa.array(case, type=ty, from_pandas=True)
assert result.null_count == (1 if ty != 'null' else 2)
@pytest.mark.pandas
def test_pandas_null_sentinels_index():
# ARROW-7023 - ensure that when passing a pandas Index, "from_pandas"
# semantics are used
import pandas as pd
idx = pd.Index([1, 2, np.nan], dtype=object)
result = pa.array(idx)
expected = pa.array([1, 2, np.nan], from_pandas=True)
assert result.equals(expected)
def test_array_from_numpy_datetimeD():
arr = np.array([None, datetime.date(2017, 4, 4)], dtype='datetime64[D]')
result = pa.array(arr)
expected = pa.array([None, datetime.date(2017, 4, 4)], type=pa.date32())
assert result.equals(expected)
@pytest.mark.parametrize(('dtype', 'type'), [
('datetime64[s]', pa.timestamp('s')),
('datetime64[ms]', pa.timestamp('ms')),
('datetime64[us]', pa.timestamp('us')),
('datetime64[ns]', pa.timestamp('ns'))
])
def test_array_from_numpy_datetime(dtype, type):
data = [
None,
datetime.datetime(2017, 4, 4, 12, 11, 10),
datetime.datetime(2018, 1, 1, 0, 2, 0)
]
# from numpy array
arr = pa.array(np.array(data, dtype=dtype))
expected = pa.array(data, type=type)
assert arr.equals(expected)
# from list of numpy scalars
arr = pa.array(list(np.array(data, dtype=dtype)))
assert arr.equals(expected)
def test_array_from_different_numpy_datetime_units_raises():
data = [
None,
datetime.datetime(2017, 4, 4, 12, 11, 10),
datetime.datetime(2018, 1, 1, 0, 2, 0)
]
s = np.array(data, dtype='datetime64[s]')
ms = np.array(data, dtype='datetime64[ms]')
data = list(s[:2]) + list(ms[2:])
with pytest.raises(pa.ArrowNotImplementedError):
pa.array(data)
@pytest.mark.parametrize('unit', ['ns', 'us', 'ms', 's'])
def test_array_from_list_of_timestamps(unit):
n = np.datetime64('NaT', unit)
x = np.datetime64('2017-01-01 01:01:01.111111111', unit)
y = np.datetime64('2018-11-22 12:24:48.111111111', unit)
a1 = pa.array([n, x, y])
a2 = pa.array([n, x, y], type=pa.timestamp(unit))
assert a1.type == a2.type
assert a1.type.unit == unit
assert a1[0] == a2[0]
def test_array_from_timestamp_with_generic_unit():
n = np.datetime64('NaT')
x = np.datetime64('2017-01-01 01:01:01.111111111')
y = np.datetime64('2018-11-22 12:24:48.111111111')
with pytest.raises(pa.ArrowNotImplementedError,
match='Unbound or generic datetime64 time unit'):
pa.array([n, x, y])
@pytest.mark.parametrize(('dtype', 'type'), [
('timedelta64[s]', pa.duration('s')),
('timedelta64[ms]', pa.duration('ms')),
('timedelta64[us]', pa.duration('us')),
('timedelta64[ns]', pa.duration('ns'))
])
def test_array_from_numpy_timedelta(dtype, type):
data = [
None,
datetime.timedelta(1),
datetime.timedelta(0, 1)
]
# from numpy array
np_arr = np.array(data, dtype=dtype)
arr = pa.array(np_arr)
assert isinstance(arr, pa.DurationArray)
assert arr.type == type
expected = pa.array(data, type=type)
assert arr.equals(expected)
assert arr.to_pylist() == data
# from list of numpy scalars
arr = pa.array(list(np.array(data, dtype=dtype)))
assert arr.equals(expected)
assert arr.to_pylist() == data
def test_array_from_numpy_timedelta_incorrect_unit():
# generic (no unit)
td = np.timedelta64(1)
for data in [[td], np.array([td])]:
with pytest.raises(NotImplementedError):
pa.array(data)
# unsupported unit
td = np.timedelta64(1, 'M')
for data in [[td], np.array([td])]:
with pytest.raises(NotImplementedError):
pa.array(data)
def test_array_from_numpy_ascii():
arr = np.array(['abcde', 'abc', ''], dtype='|S5')
arrow_arr = pa.array(arr)
assert arrow_arr.type == 'binary'
expected = pa.array(['abcde', 'abc', ''], type='binary')
assert arrow_arr.equals(expected)
mask = np.array([False, True, False])
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', None, ''], type='binary')
assert arrow_arr.equals(expected)
# Strided variant
arr = np.array(['abcde', 'abc', ''] * 5, dtype='|S5')[::2]
mask = np.array([False, True, False] * 5)[::2]
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''],
type='binary')
assert arrow_arr.equals(expected)
# 0 itemsize
arr = np.array(['', '', ''], dtype='|S0')
arrow_arr = pa.array(arr)
expected = pa.array(['', '', ''], type='binary')
assert arrow_arr.equals(expected)
def test_array_from_numpy_unicode():
dtypes = ['<U5', '>U5']
for dtype in dtypes:
arr = np.array(['abcde', 'abc', ''], dtype=dtype)
arrow_arr = pa.array(arr)
assert arrow_arr.type == 'utf8'
expected = pa.array(['abcde', 'abc', ''], type='utf8')
assert arrow_arr.equals(expected)
mask = np.array([False, True, False])
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', None, ''], type='utf8')
assert arrow_arr.equals(expected)
# Strided variant
arr = np.array(['abcde', 'abc', ''] * 5, dtype=dtype)[::2]
mask = np.array([False, True, False] * 5)[::2]
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', '', None, 'abcde', '', None,
'abcde', ''], type='utf8')
assert arrow_arr.equals(expected)
# 0 itemsize
arr = np.array(['', '', ''], dtype='<U0')
arrow_arr = pa.array(arr)
expected = pa.array(['', '', ''], type='utf8')
assert arrow_arr.equals(expected)
def test_array_string_from_non_string():
# ARROW-5682 - when converting to string raise on non string-like dtype
with pytest.raises(TypeError):
pa.array(np.array([1, 2, 3]), type=pa.string())
def test_array_string_from_all_null():
# ARROW-5682
vals = np.array([None, None], dtype=object)
arr = pa.array(vals, type=pa.string())
assert arr.null_count == 2
vals = np.array([np.nan, np.nan], dtype='float64')
# by default raises, but accept as all-null when from_pandas=True
with pytest.raises(TypeError):
pa.array(vals, type=pa.string())
arr = pa.array(vals, type=pa.string(), from_pandas=True)
assert arr.null_count == 2
def test_array_from_masked():
ma = np.ma.array([1, 2, 3, 4], dtype='int64',
mask=[False, False, True, False])
result = pa.array(ma)
expected = pa.array([1, 2, None, 4], type='int64')
assert expected.equals(result)
with pytest.raises(ValueError, match="Cannot pass a numpy masked array"):
pa.array(ma, mask=np.array([True, False, False, False]))
def test_array_from_shrunken_masked():
ma = np.ma.array([0], dtype='int64')
result = pa.array(ma)
expected = pa.array([0], type='int64')
assert expected.equals(result)
def test_array_from_invalid_dim_raises():
msg = "only handle 1-dimensional arrays"
arr2d = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=msg):
pa.array(arr2d)
arr0d = np.array(0)
with pytest.raises(ValueError, match=msg):
pa.array(arr0d)
def test_array_from_strided_bool():
# ARROW-6325
arr = np.ones((3, 2), dtype=bool)
result = pa.array(arr[:, 0])
expected = pa.array([True, True, True])
assert result.equals(expected)
result = pa.array(arr[0, :])
expected = pa.array([True, True])
assert result.equals(expected)
def test_boolean_true_count_false_count():
# ARROW-9145
arr = pa.array([True, True, None, False, None, True] * 1000)
assert arr.true_count == 3000
assert arr.false_count == 1000
def test_buffers_primitive():
a = pa.array([1, 2, None, 4], type=pa.int16())
buffers = a.buffers()
assert len(buffers) == 2
null_bitmap = buffers[0].to_pybytes()
assert 1 <= len(null_bitmap) <= 64 # XXX this is varying
assert bytearray(null_bitmap)[0] == 0b00001011
# Slicing does not affect the buffers but the offset
a_sliced = a[1:]
buffers = a_sliced.buffers()
a_sliced.offset == 1
assert len(buffers) == 2
null_bitmap = buffers[0].to_pybytes()
assert 1 <= len(null_bitmap) <= 64 # XXX this is varying
assert bytearray(null_bitmap)[0] == 0b00001011
assert struct.unpack('hhxxh', buffers[1].to_pybytes()) == (1, 2, 4)
a = pa.array(np.int8([4, 5, 6]))
buffers = a.buffers()
assert len(buffers) == 2
# No null bitmap from Numpy int array
assert buffers[0] is None
assert struct.unpack('3b', buffers[1].to_pybytes()) == (4, 5, 6)
a = pa.array([b'foo!', None, b'bar!!'])
buffers = a.buffers()
assert len(buffers) == 3
null_bitmap = buffers[0].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000101
offsets = buffers[1].to_pybytes()
assert struct.unpack('4i', offsets) == (0, 4, 4, 9)
values = buffers[2].to_pybytes()
assert values == b'foo!bar!!'
def test_buffers_nested():
a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64()))
buffers = a.buffers()
assert len(buffers) == 4
# The parent buffers
null_bitmap = buffers[0].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000101
offsets = buffers[1].to_pybytes()
assert struct.unpack('4i', offsets) == (0, 2, 2, 6)
# The child buffers
null_bitmap = buffers[2].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00110111
values = buffers[3].to_pybytes()
assert struct.unpack('qqq8xqq', values) == (1, 2, 3, 4, 5)
a = pa.array([(42, None), None, (None, 43)],
type=pa.struct([pa.field('a', pa.int8()),
pa.field('b', pa.int16())]))
buffers = a.buffers()
assert len(buffers) == 5
# The parent buffer
null_bitmap = buffers[0].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000101
# The child buffers: 'a'
null_bitmap = buffers[1].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000001
values = buffers[2].to_pybytes()
assert struct.unpack('bxx', values) == (42,)
# The child buffers: 'b'
null_bitmap = buffers[3].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000100
values = buffers[4].to_pybytes()
assert struct.unpack('4xh', values) == (43,)
def test_nbytes_sizeof():
a = pa.array(np.array([4, 5, 6], dtype='int64'))
assert a.nbytes == 8 * 3
assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes
a = pa.array([1, None, 3], type='int64')
assert a.nbytes == 8*3 + 1
assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes
a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64()))
assert a.nbytes == 1 + 4 * 4 + 1 + 6 * 8
assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes
def test_invalid_tensor_constructor_repr():
# ARROW-2638: prevent calling extension class constructors directly
with pytest.raises(TypeError):
repr(pa.Tensor([1]))
def test_invalid_tensor_construction():
with pytest.raises(TypeError):
pa.Tensor()
@pytest.mark.parametrize(('offset_type', 'list_type_factory'),
[(pa.int32(), pa.list_), (pa.int64(), pa.large_list)])
def test_list_array_flatten(offset_type, list_type_factory):
typ2 = list_type_factory(
list_type_factory(
pa.int64()
)
)
arr2 = pa.array([
None,
[
[1, None, 2],
None,
[3, 4]
],
[],
[
[],
[5, 6],
None
],
[
[7, 8]
]
], type=typ2)
offsets2 = pa.array([0, 0, 3, 3, 6, 7], type=offset_type)
typ1 = list_type_factory(pa.int64())
arr1 = pa.array([
[1, None, 2],
None,
[3, 4],
[],
[5, 6],
None,
[7, 8]
], type=typ1)
offsets1 = pa.array([0, 3, 3, 5, 5, 7, 7, 9], type=offset_type)
arr0 = pa.array([
1, None, 2,
3, 4,
5, 6,
7, 8
], type=pa.int64())
assert arr2.flatten().equals(arr1)
assert arr2.offsets.equals(offsets2)
assert arr2.values.equals(arr1)
assert arr1.flatten().equals(arr0)
assert arr1.offsets.equals(offsets1)
assert arr1.values.equals(arr0)
assert arr2.flatten().flatten().equals(arr0)
assert arr2.values.values.equals(arr0)
@pytest.mark.parametrize(('offset_type', 'list_type_factory'),
[(pa.int32(), pa.list_), (pa.int64(), pa.large_list)])
def test_list_value_parent_indices(offset_type, list_type_factory):
arr = pa.array(
[
[0, 1, 2],
None,
[],
[3, 4]
], type=list_type_factory(pa.int32()))
expected = pa.array([0, 0, 0, 3, 3], type=offset_type)
assert arr.value_parent_indices().equals(expected)
@pytest.mark.parametrize(('offset_type', 'list_type_factory'),
[(pa.int32(), pa.list_), (pa.int64(), pa.large_list)])
def test_list_value_lengths(offset_type, list_type_factory):
arr = pa.array(
[
[0, 1, 2],
None,
[],
[3, 4]
], type=list_type_factory(pa.int32()))
expected = pa.array([3, None, 0, 2], type=offset_type)
assert arr.value_lengths().equals(expected)
@pytest.mark.parametrize('list_type_factory', [pa.list_, pa.large_list])
def test_list_array_flatten_non_canonical(list_type_factory):
# Non-canonical list array (null elements backed by non-empty sublists)
typ = list_type_factory(pa.int64())
arr = pa.array([[1], [2, 3], [4, 5, 6]], type=typ)
buffers = arr.buffers()[:2]
buffers[0] = pa.py_buffer(b"\x05") # validity bitmap
arr = arr.from_buffers(arr.type, len(arr), buffers, children=[arr.values])
assert arr.to_pylist() == [[1], None, [4, 5, 6]]
assert arr.offsets.to_pylist() == [0, 1, 3, 6]
flattened = arr.flatten()
flattened.validate(full=True)
assert flattened.type == typ.value_type
assert flattened.to_pylist() == [1, 4, 5, 6]
# .values is the physical values array (including masked elements)
assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6]
@pytest.mark.parametrize('klass', [pa.ListArray, pa.LargeListArray])
def test_list_array_values_offsets_sliced(klass):
# ARROW-7301
arr = klass.from_arrays(offsets=[0, 3, 4, 6], values=[1, 2, 3, 4, 5, 6])
assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6]
assert arr.offsets.to_pylist() == [0, 3, 4, 6]
# sliced -> values keeps referring to full values buffer, but offsets is
# sliced as well so the offsets correctly point into the full values array
# sliced -> flatten() will return the sliced value array.
arr2 = arr[1:]
assert arr2.values.to_pylist() == [1, 2, 3, 4, 5, 6]
assert arr2.offsets.to_pylist() == [3, 4, 6]
assert arr2.flatten().to_pylist() == [4, 5, 6]
i = arr2.offsets[0].as_py()
j = arr2.offsets[1].as_py()
assert arr2[0].as_py() == arr2.values[i:j].to_pylist() == [4]
def test_fixed_size_list_array_flatten():
typ2 = pa.list_(pa.list_(pa.int64(), 2), 3)
arr2 = pa.array([
[
[1, 2],
[3, 4],
[5, 6],
],
None,
[
[7, None],
None,
[8, 9]
],
], type=typ2)
assert arr2.type.equals(typ2)
typ1 = pa.list_(pa.int64(), 2)
arr1 = pa.array([
[1, 2], [3, 4], [5, 6],
None, None, None,
[7, None], None, [8, 9]
], type=typ1)
assert arr1.type.equals(typ1)
assert arr2.flatten().equals(arr1)
typ0 = pa.int64()
arr0 = pa.array([
1, 2, 3, 4, 5, 6,
None, None, None, None, None, None,
7, None, None, None, 8, 9,
], type=typ0)
assert arr0.type.equals(typ0)
assert arr1.flatten().equals(arr0)
assert arr2.flatten().flatten().equals(arr0)
def test_struct_array_flatten():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
xs, ys = a.flatten()
assert xs.type == pa.int16()
assert ys.type == pa.float32()
assert xs.to_pylist() == [1, 3, 5]
assert ys.to_pylist() == [2.5, 4.5, 6.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [3, 5]
assert ys.to_pylist() == [4.5, 6.5]
a = pa.array([(1, 2.5), None, (3, 4.5)], type=ty)
xs, ys = a.flatten()
assert xs.to_pylist() == [1, None, 3]
assert ys.to_pylist() == [2.5, None, 4.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [None, 3]
assert ys.to_pylist() == [None, 4.5]
a = pa.array([(1, None), (2, 3.5), (None, 4.5)], type=ty)
xs, ys = a.flatten()
assert xs.to_pylist() == [1, 2, None]
assert ys.to_pylist() == [None, 3.5, 4.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [2, None]
assert ys.to_pylist() == [3.5, 4.5]
a = pa.array([(1, None), None, (None, 2.5)], type=ty)
xs, ys = a.flatten()
assert xs.to_pylist() == [1, None, None]
assert ys.to_pylist() == [None, None, 2.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [None, None]
assert ys.to_pylist() == [None, 2.5]
def test_struct_array_field():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
x0 = a.field(0)
y0 = a.field(1)
x1 = a.field(-2)
y1 = a.field(-1)
x2 = a.field('x')
y2 = a.field('y')
assert isinstance(x0, pa.lib.Int16Array)
assert isinstance(y1, pa.lib.FloatArray)
assert x0.equals(pa.array([1, 3, 5], type=pa.int16()))
assert y0.equals(pa.array([2.5, 4.5, 6.5], type=pa.float32()))
assert x0.equals(x1)
assert x0.equals(x2)
assert y0.equals(y1)
assert y0.equals(y2)
for invalid_index in [None, pa.int16()]:
with pytest.raises(TypeError):
a.field(invalid_index)
for invalid_index in [3, -3]:
with pytest.raises(IndexError):
a.field(invalid_index)
for invalid_name in ['z', '']:
with pytest.raises(KeyError):
a.field(invalid_name)
def test_empty_cast():
types = [
pa.null(),
pa.bool_(),
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64(),
pa.float16(),
pa.float32(),
pa.float64(),
pa.date32(),
pa.date64(),
pa.binary(),
pa.binary(length=4),
pa.string(),
]
for (t1, t2) in itertools.product(types, types):
try:
# ARROW-4766: Ensure that supported types conversion don't segfault
# on empty arrays of common types
pa.array([], type=t1).cast(t2)
except (pa.lib.ArrowNotImplementedError, pa.ArrowInvalid):
continue
def test_nested_dictionary_array():
dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b'])
list_arr = pa.ListArray.from_arrays([0, 2, 3], dict_arr)
assert list_arr.to_pylist() == [['a', 'b'], ['a']]
dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b'])
dict_arr2 = pa.DictionaryArray.from_arrays([0, 1, 2, 1, 0], dict_arr)
assert dict_arr2.to_pylist() == ['a', 'b', 'a', 'b', 'a']
def test_array_from_numpy_str_utf8():
# ARROW-3890 -- in Python 3, NPY_UNICODE arrays are produced, but in Python
# 2 they are NPY_STRING (binary), so we must do UTF-8 validation
vec = np.array(["toto", "tata"])
vec2 = np.array(["toto", "tata"], dtype=object)
arr = pa.array(vec, pa.string())
arr2 = pa.array(vec2, pa.string())
expected = pa.array(["toto", "tata"])
assert arr.equals(expected)
assert arr2.equals(expected)
# with mask, separate code path
mask = np.array([False, False], dtype=bool)
arr = pa.array(vec, pa.string(), mask=mask)
assert arr.equals(expected)
# UTF8 validation failures
vec = np.array([('mañana').encode('utf-16-le')])
with pytest.raises(ValueError):
pa.array(vec, pa.string())
with pytest.raises(ValueError):
pa.array(vec, pa.string(), mask=np.array([False]))
@pytest.mark.large_memory
def test_numpy_binary_overflow_to_chunked():
# ARROW-3762, ARROW-5966
# 2^31 + 1 bytes
values = [b'x']
unicode_values = ['x']
# Make 10 unique 1MB strings then repeat then 2048 times
unique_strings = {
i: b'x' * ((1 << 20) - 1) + str(i % 10).encode('utf8')
for i in range(10)
}
unicode_unique_strings = {i: x.decode('utf8')
for i, x in unique_strings.items()}
values += [unique_strings[i % 10] for i in range(1 << 11)]
unicode_values += [unicode_unique_strings[i % 10]
for i in range(1 << 11)]
for case, ex_type in [(values, pa.binary()),
(unicode_values, pa.utf8())]:
arr = np.array(case)
arrow_arr = pa.array(arr)
arr = None
assert isinstance(arrow_arr, pa.ChunkedArray)
assert arrow_arr.type == ex_type
# Split up into 16MB chunks. 128 * 16 = 2048, so 129
assert arrow_arr.num_chunks == 129
value_index = 0
for i in range(arrow_arr.num_chunks):
chunk = arrow_arr.chunk(i)
for val in chunk:
assert val.as_py() == case[value_index]
value_index += 1
@pytest.mark.large_memory
def test_list_child_overflow_to_chunked():
vals = [['x' * 1024]] * ((2 << 20) + 1)
with pytest.raises(ValueError, match="overflowed"):
pa.array(vals)
def test_infer_type_masked():
# ARROW-5208
ty = pa.infer_type(['foo', 'bar', None, 2],
mask=[False, False, False, True])
assert ty == pa.utf8()
# all masked
ty = pa.infer_type(['foo', 'bar', None, 2],
mask=np.array([True, True, True, True]))
assert ty == pa.null()
# length 0
assert pa.infer_type([], mask=[]) == pa.null()
def test_array_masked():
# ARROW-5208
arr = pa.array([4, None, 4, 3.],
mask=np.array([False, True, False, True]))
assert arr.type == pa.int64()
# ndarray dtype=object argument
arr = pa.array(np.array([4, None, 4, 3.], dtype="O"),
mask=np.array([False, True, False, True]))
assert arr.type == pa.int64()
def test_array_from_large_pyints():
# ARROW-5430
with pytest.raises(OverflowError):
# too large for int64 so dtype must be explicitly provided
pa.array([int(2 ** 63)])
def test_array_protocol():
class MyArray:
def __init__(self, data):
self.data = data
def __arrow_array__(self, type=None):
return pa.array(self.data, type=type)
arr = MyArray(np.array([1, 2, 3], dtype='int64'))
result = pa.array(arr)
expected = pa.array([1, 2, 3], type=pa.int64())
assert result.equals(expected)
result = pa.array(arr, type=pa.int64())
expected = pa.array([1, 2, 3], type=pa.int64())
assert result.equals(expected)
result = pa.array(arr, type=pa.float64())
expected = pa.array([1, 2, 3], type=pa.float64())
assert result.equals(expected)
# raise error when passing size or mask keywords
with pytest.raises(ValueError):
pa.array(arr, mask=np.array([True, False, True]))
with pytest.raises(ValueError):
pa.array(arr, size=3)
# ensure the return value is an Array
class MyArrayInvalid:
def __init__(self, data):
self.data = data
def __arrow_array__(self, type=None):
return np.array(self.data)
arr = MyArrayInvalid(np.array([1, 2, 3], dtype='int64'))
with pytest.raises(TypeError):
pa.array(arr)
# ARROW-7066 - allow ChunkedArray output
class MyArray2:
def __init__(self, data):
self.data = data
def __arrow_array__(self, type=None):
return pa.chunked_array([self.data], type=type)
arr = MyArray2(np.array([1, 2, 3], dtype='int64'))
result = pa.array(arr)
expected = pa.chunked_array([[1, 2, 3]], type=pa.int64())
assert result.equals(expected)
def test_concat_array():
concatenated = pa.concat_arrays(
[pa.array([1, 2]), pa.array([3, 4])])
assert concatenated.equals(pa.array([1, 2, 3, 4]))
def test_concat_array_different_types():
with pytest.raises(pa.ArrowInvalid):
pa.concat_arrays([pa.array([1]), pa.array([2.])])
@pytest.mark.pandas
def test_to_pandas_timezone():
# https://issues.apache.org/jira/browse/ARROW-6652
arr = pa.array([1, 2, 3], type=pa.timestamp('s', tz='Europe/Brussels'))
s = arr.to_pandas()
assert s.dt.tz is not None
arr = pa.chunked_array([arr])
s = arr.to_pandas()
assert s.dt.tz is not None
| apache-2.0 |
Winand/pandas | pandas/core/indexes/frozen.py | 20 | 4619 | """
frozen (immutable) data structures to support MultiIndexing
These are used for:
- .names (FrozenList)
- .levels & .labels (FrozenNDArray)
"""
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.io.formats.printing import pprint_thing
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Sidenote: This has to be of type list, otherwise it messes up PyTables
# typechecks
def __add__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(super(FrozenList, self).__add__(other))
__iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super(FrozenList, self).__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super(FrozenList, self).__getitem__(n))
return super(FrozenList, self).__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super(FrozenList, self).__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super(FrozenList, self).__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__.__name__)
def __unicode__(self):
return pprint_thing(self, quote_strings=True,
escape_chars=('\t', '\r', '\n'))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def searchsorted(self, v, side='left', sorter=None):
"""
Find indices where elements of v should be inserted
in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""
# we are much more performant if the searched
# indexer is the same type as the array
# this doesn't matter for int64, but DOES
# matter for smaller int dtypes
# https://github.com/numpy/numpy/issues/5370
try:
v = self.dtype.type(v)
except:
pass
return super(FrozenNDArray, self).searchsorted(
v, side=side, sorter=sorter)
def _ensure_frozen(array_like, categories, copy=False):
array_like = coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
| bsd-3-clause |
iismd17/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
UNR-AERIAL/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
ibab/tensorflow | tensorflow/examples/skflow/text_classification_save_restore.py | 9 | 3724 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
### Training data
# Downloads, unpacks and reads DBpedia dataset.
dbpedia = learn.datasets.load_dataset('dbpedia')
X_train, y_train = pandas.DataFrame(dbpedia.train.data)[1], pandas.Series(dbpedia.train.target)
X_test, y_test = pandas.DataFrame(dbpedia.test.data)[1], pandas.Series(dbpedia.test.target)
### Process vocabulary
MAX_DOCUMENT_LENGTH = 10
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 50
def average_model(X, y):
word_vectors = learn.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
return learn.models.logistic_regression(features, y)
def rnn_model(X, y):
"""Recurrent neural network model to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = learn.ops.split_squeeze(1, MAX_DOCUMENT_LENGTH, word_vectors)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
return learn.models.logistic_regression(encoding, y)
model_path = '/tmp/skflow_examples/text_classification'
if os.path.exists(model_path):
classifier = learn.TensorFlowEstimator.restore(model_path)
else:
classifier = learn.TensorFlowEstimator(model_fn=rnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps
while True:
try:
classifier.fit(X_train, y_train)
except KeyboardInterrupt:
classifier.save(model_path)
break
# Predict on test set
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/cluster/hierarchy.py | 3 | 91486 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics. The customized distance can also be
used. See the ``distance.pdist`` function for details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.xaxis.set_ticks_position('bottom')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| gpl-2.0 |
mdhaber/scipy | scipy/stats/__init__.py | 7 | 12444 | """
.. _statsrefmanual:
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. currentmodule:: scipy.stats
This module contains a large number of probability distributions,
summary and frequency statistics, correlation functions and statistical
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
functionality, and more.
Statistics is a very large area, and there are topics that are out of scope
for SciPy and are covered by other packages. Some of the most important ones
are:
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
regression, linear models, time series analysis, extensions to topics
also covered by ``scipy.stats``.
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
functionality, interfaces to other statistical languages.
- `PyMC3 <https://docs.pymc.io/>`__: Bayesian statistical
modeling, probabilistic machine learning.
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
model selection.
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
Probability distributions
=========================
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
------------------------
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
crystalball -- Crystalball
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
genhyperbolic -- Generalized Hyperbolic
geninvgauss -- Generalized Inverse Gaussian
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
laplace -- Laplace
laplace_asymmetric -- Asymmetric Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
loguniform -- Log-Uniform
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
moyal -- Moyal
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
norminvgauss -- Normal Inverse Gaussian
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewcauchy -- Skew Cauchy
skewnorm -- Skew normal
studentized_range -- Studentized Range
t -- Student's T
trapezoid -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
--------------------------
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) group
random_correlation -- random correlation matrices
multivariate_t -- Multivariate t-distribution
multivariate_hypergeom -- Multivariate hypergeometric distribution
Discrete distributions
----------------------
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
betabinom -- Beta-Binomial
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
nhypergeom -- Negative Hypergeometric
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
yulesimon -- Yule-Simon
zipf -- Zipf (Zeta)
zipfian -- Zipfian
An overview of statistical functions is given below. Many of these functions
have a similar version in `scipy.stats.mstats` which work for masked arrays.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
mode -- Modal value
moment -- Central moment
skew -- Skewness
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
gstd -- Geometric Standard Deviation
iqr
sem
bayes_mvs
mvsdist
entropy
differential_entropy
median_absolute_deviation
median_abs_deviation
bootstrap
Frequency statistics
====================
.. autosummary::
:toctree: generated/
cumfreq
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
Correlation functions
=====================
.. autosummary::
:toctree: generated/
f_oneway
alexandergovern
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
somersd
linregress
siegelslopes
theilslopes
multiscale_graphcorr
Statistical tests
=================
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
cramervonmises
cramervonmises_2samp
power_divergence
kstest
ks_1samp
ks_2samp
epps_singleton_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
jarque_bera
page_trend_test
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
binomtest
fligner
median_test
mood
skewtest
kurtosistest
normaltest
Quasi-Monte Carlo
=================
.. toctree::
:maxdepth: 4
stats.qmc
Masked statistics functions
===========================
.. toctree::
stats.mstats
Other statistical functionality
===============================
Transformations
---------------
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
yeojohnson
yeojohnson_normmax
yeojohnson_llf
obrientransform
sigmaclip
trimboth
trim1
zmap
zscore
Statistical distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
Random variate generation / CDF Inversion
=========================================
.. autosummary::
:toctree: generated/
rvs_ratio_uniforms
NumericalInverseHermite
Circular statistical functions
------------------------------
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
---------------------------
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.crosstab
contingency.expected_freq
contingency.margins
contingency.relative_risk
contingency.association
fisher_exact
barnard_exact
boschloo_exact
Plot-tests
----------
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
yeojohnson_normplot
Univariate and multivariate kernel density estimation
-----------------------------------------------------
.. autosummary::
:toctree: generated/
gaussian_kde
Warnings used in :mod:`scipy.stats`
-----------------------------------
.. autosummary::
:toctree: generated/
F_onewayConstantInputWarning
F_onewayBadInputSizesWarning
PearsonRConstantInputWarning
PearsonRNearConstantInputWarning
SpearmanRConstantInputWarning
"""
from .stats import *
from .distributions import *
from .morestats import *
from ._binomtest import binomtest
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from . import qmc
from ._multivariate import *
from . import contingency
from .contingency import chi2_contingency
from ._bootstrap import bootstrap
from ._entropy import *
from ._hypotests import *
from ._rvs_sampling import rvs_ratio_uniforms, NumericalInverseHermite
from ._page_trend_test import page_trend_test
from ._mannwhitneyu import mannwhitneyu
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| bsd-3-clause |
n7jti/kaggle | DigitRecognizer/digitr2.py | 1 | 1717 | #!/usr/bin/python
from scipy import *
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn import metrics
from sklearn import svm
import time
import pickle
def load ():
# Load a csv of floats:
#train = np.genfromtxt("data/train.csv", delimiter=",", skip_header=1)
#y_train = train[:,0].astype(int)
#x_train = train[:,1:]
npzfile = np.load('data/bindata.npz')
x = npzfile['x']
y = npzfile['y'].astype(int)
#test = np.genfromtxt("data/test.csv", delimiter=",", skip_header=1)
#x_test = test
return y, x
def main ():
print 'starting', time.asctime(time.localtime())
start = time.clock()
y, x, = load();
# split into a training and testing set
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5)
x_train = x[0:20000,:]
y_train = y[0:20000]
x_test = x[20000:40000,:]
y_test = y[20000:40000]
# Set the parameters by cross-validation
C=10
gamma=1e-7
clf = svm.SVC(C=C, gamma=gamma)
# We learn the digits on the first half of the digits
clf.fit(x_train, y_train)
# Pickle the model!
outf = open('model.pkl', 'wb')
pickle.dump(clf, outf)
outf.close()
# Now predict the value of the digit on the second half:
y_true, y_pred = y_test, clf.predict(x_test)
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(y_true, y_pred)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_true, y_pred))
stop = time.clock()
print 'elapsed:', stop - start
if __name__ == "__main__":
main()
| apache-2.0 |
trankmichael/scikit-learn | sklearn/utils/__init__.py | 132 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
IQuOD/AutoQC | analyse-results.py | 1 | 24741 | import csv, getopt, json, pandas, sys, ast
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from util import dbutils, main
def read_qc_groups(filename='qctest_groups.csv'):
# Read a csv file containing information on QC test groups.
# The program returns a dictionary containing the information
# from csv file.
csvfile = open(filename)
groupinfo = csv.reader(csvfile)
# Create empty lists for each type of group.
groupdefinition = {'Remove above reject':[],
'Remove below reject':[],
'Remove rejected levels':[],
'Remove profile':[],
'Optional':[],
'At least one from group':{}}
# Fill out the lists.
for i, spec in enumerate(groupinfo):
if i == 0: continue # Miss the header line.
if spec[1] in groupdefinition:
# For the 'at least one from group' rule we maintain lists of
# the tests in each group.
if spec[1] == 'At least one from group':
if spec[0] in groupdefinition[spec[1]]:
groupdefinition[spec[1]][spec[0]].append(spec[2])
else:
groupdefinition[spec[1]][spec[0]] = [spec[2]]
else:
# Other rules have a list of the tests that fall into them.
groupdefinition[spec[1]].append(spec[2])
else:
raise NameError('Rule not recognised: ', spec)
csvfile.close()
return groupdefinition
def return_cost(costratio, tpr, fpr):
# Return cost function.
# Inputs are:
# costratio - 2 element iterable used to define the cost function; roughly
# they define the minimum required gradient of the ROC curve
# with the first giving the gradient at the start, the second
# the gradient at the end. Suggested values: to get a compromise
# set of QC tests, define costratio = [2.5, 1.0]; to get a
# conservative set of QC tests, define costratio = [10.0, 10.0].
# tpr - True positive rate to get the cost for.
# fpr - False positive rate to get the cost for.
# Returns: the cost function value.
costratio1 = costratio[0]
costratio2 = costratio[1]
theta1 = np.arctan(costratio1)
theta2 = np.arctan(costratio2)
cost1 = (100.0 - tpr) * np.cos(theta1) + fpr * np.sin(theta1)
cost2 = (100.0 - tpr) * np.cos(theta2) + fpr * np.sin(theta2)
cost = (100.0 - tpr) / 100.0 * cost1 + tpr / 100.0 * cost2
return cost
def find_roc(table,
targetdb,
costratio=[2.5, 1.0],
filter_on_wire_break_test=False,
filter_from_file_spec=True,
enforce_types_of_check=True,
n_profiles_to_analyse=np.iinfo(np.int32).max,
n_combination_iterations=0,
with_reverses=False,
effectiveness_ratio=2.0,
improve_threshold=1.0,
verbose=True,
plot_roc=True,
write_roc=True,
mark_training=False):
'''
Generates a ROC curve from the database data in table by maximising the gradient
of the ROC curve. It will combine different tests together and invert the results
of tests if requested.
costratio - two element iterable that defines how the ROC curve is developed. Higher
numbers gives a ROC curve with lower false rates; the two elements allows
control over the shape of the ROC curve near the start and end. E.g. [2.5, 1.0].
filter_on_wire_break_test - filter out the impact of XBT wire breaks from results.
filter_from_file_spec - use specification from file to choose filtering.
enforce_types_of_check - use specification from file on particular types of checks to use.
n_profiles_to_analyse - restrict the number of profiles extracted from the database.
n_combination_iterations - AND tests together; restricted to max of 2 as otherwise
number of tests gets very large.
with_reverses - if True, a copy of each test with inverted results is made.
effectiveness_ratio - will give a warning if TPR / FPR is less than this value.
improve_threshold - ignores tests if they do not results in a change in true positive
rate (in %) of at least this amount.
verbose - if True, will print a lot of messages to screen.
plot_roc - if True, will save an image of the ROC to roc.png.
write_roc - if True, will save the ROC data to roc.json.
'''
# Read QC test specifications if required.
groupdefinition = {}
if filter_from_file_spec or enforce_types_of_check:
groupdefinition = read_qc_groups()
# Read data from database into a pandas data frame.
df = dbutils.db_to_df(table = table,
targetdb = targetdb,
filter_on_wire_break_test = filter_on_wire_break_test,
filter_on_tests = groupdefinition,
n_to_extract = n_profiles_to_analyse,
pad=2,
XBTbelow=True)
# Drop nondiscriminating tests
nondiscrim = []
cols = list(df.columns)
for c in cols:
if len(pandas.unique(df[c])) == 1:
nondiscrim.append(c)
if verbose: print(c + ' is nondiscriminating and will be removed')
cols = [t for t in cols if t not in nondiscrim]
df = df[cols]
print(list(df))
testNames = df.columns[2:].values.tolist()
if verbose:
print('Number of profiles is: ', len(df.index))
print('Number of quality checks to process is: ', len(testNames))
# mark chosen profiles as part of the training set
all_uids = main.dbinteract('SELECT uid from ' + table + ';', targetdb=targetdb)
if mark_training:
for uid in all_uids:
uid = uid[0]
is_training = int(uid in df['uid'].astype(int).to_numpy())
query = "UPDATE " + table + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
main.dbinteract(query, targetdb=targetdb)
# Convert to numpy structures and make inverse versions of tests if required.
# Any test with true positive rate of zero is discarded.
truth = df['Truth'].to_numpy()
tests = []
names = []
tprs = []
fprs = []
if with_reverses:
reverselist = [False, True]
else:
reverselist = [False]
for i, testname in enumerate(testNames):
for reversal in reverselist:
results = df[testname].to_numpy() != reversal
tpr, fpr, fnr, tnr = main.calcRates(results, truth)
if tpr > 0.0:
tests.append(results)
if reversal:
addtext = 'r'
else:
addtext = ''
names.append(addtext + testname)
tprs.append(tpr)
fprs.append(fpr)
del df # No further need for the data frame.
if verbose: print('Number of quality checks after adding reverses and removing zero TPR was: ', len(names))
# Create storage to hold the roc curve.
cumulative = truth.copy()
cumulative[:] = False
currenttpr = 0.0
currentfpr = 0.0
r_fprs = [] # The false positive rate for each ROC point.
r_tprs = [] # True positive rate for each ROC point.
testcomb = [] # The QC test that was added at each ROC point.
groupsel = [] # Set to True if the ROC point was from an enforced group.
# Pre-select some tests if required.
if enforce_types_of_check:
if verbose: print('Enforcing types of checks')
while len(groupdefinition['At least one from group']) > 0:
bestchoice = ''
bestgroup = ''
bestdist = np.sqrt(100.0**2 + 100.0**2)
besti = -1
for key in groupdefinition['At least one from group']:
for testname in groupdefinition['At least one from group'][key]:
# Need to check that the test exists - it may have been removed
# if it was non-discriminating.
if testname in names:
for itest, name in enumerate(names):
if name == testname:
cumulativenew = np.logical_or(cumulative, tests[itest])
tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
newdist = return_cost(costratio, tpr, fpr)
print(' ', tpr, fpr, newdist, bestdist, testname)
if newdist == bestdist:
if verbose:
print(' ' + bestchoice + ' and ' + testname + ' have the same results and the first is kept')
elif newdist < bestdist:
bestchoice = testname
bestdist = newdist
besti = itest
bestgroup = key
else:
if verbose: print(' ' + testname + ' not found and so was skipped')
if bestchoice == '':
print('WARNING no suitable tests in group "' + key + '", skipping')
del groupdefinition['At least one from group'][key]
else:
if verbose: print(' ' + bestchoice + ' was selected from group ' + bestgroup)
if fprs[besti] > 0:
if tprs[besti] / fprs[besti] < effectiveness_ratio:
print('WARNING - ' + bestchoice + ' TPR / FPR is below the effectiveness ratio limit: ', tprs[besti] / fprs[besti], effectiveness_ratio)
cumulative = np.logical_or(cumulative, tests[besti])
currenttpr, currentfpr, fnr, tnr = main.calcRates(cumulative, truth)
testcomb.append(names[besti])
r_fprs.append(currentfpr)
r_tprs.append(currenttpr)
groupsel.append(True)
# Once a test has been added, it can be deleted so that it is not considered again.
del names[besti]
del tests[besti]
del fprs[besti]
del tprs[besti]
del groupdefinition['At least one from group'][bestgroup]
print('ROC point from enforced group: ', currenttpr, currentfpr, testcomb[-1], bestgroup)
# Make combinations of the single checks and store.
assert n_combination_iterations <= 2, 'Setting n_combination_iterations > 2 results in a very large number of combinations'
if verbose: print('Starting construction of combinations with number of iterations: ', n_combination_iterations)
for its in range(n_combination_iterations):
ntests = len(names)
for i in range(ntests - 1):
if verbose: print('Processing iteration ', its + 1, ' out of ', n_combination_iterations, ' step ', i + 1, ' out of ', ntests - 1, ' with number of tests now ', len(names))
for j in range(i + 1, ntests):
# Create the name for this combination.
newname = ('&').join(sorted((names[i] + '&' + names[j]).split('&')))
if newname in names: continue # Do not keep multiple copies of the same combination.
results = np.logical_and(tests[i], tests[j])
tpr, fpr, fnr, tnr = main.calcRates(results, truth)
if tpr > 0.0:
tests.append(results)
tprs.append(tpr)
fprs.append(fpr)
names.append(newname)
if verbose: print('Completed generation of tests, now constructing roc from number of tests: ', len(names))
# Create roc.
used = np.zeros(len(names), dtype=bool)
overallbest = return_cost(costratio, tpr, fpr)
keepgoing = True
while keepgoing:
keepgoing = False
besti = -1
bestcost = overallbest
bestncomb = 100000
bestdtpr = 0
bestdfpr = 100000
for i in range(len(names)):
if used[i]: continue
cumulativenew = np.logical_or(cumulative, tests[i])
tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
dtpr = tpr - currenttpr
dfpr = fpr - currentfpr
newcost = return_cost(costratio, tpr, fpr)
newbest = False
if newcost <= bestcost and dtpr >= improve_threshold and dtpr > 0.0:
# If cost is better than found previously, use it else if it is
# the same then decide if to use it or not.
if newcost < bestcost:
newbest = True
elif dtpr >= bestdtpr:
if dtpr > bestdtpr:
newbest = True
elif len(names[i].split('&')) < bestncomb:
newbest = True
if newbest:
besti = i
bestcost = newcost
bestncomb = len(names[i].split('&'))
bestdtpr = dtpr
bestdfpr = dfpr
if besti >= 0:
keepgoing = True
used[besti] = True
overallbest = bestcost
cumulative = np.logical_or(cumulative, tests[besti])
currenttpr, currentfpr, fnr, tnr = main.calcRates(cumulative, truth)
testcomb.append(names[besti])
r_fprs.append(currentfpr)
r_tprs.append(currenttpr)
groupsel.append(False)
print('ROC point: ', currenttpr, currentfpr, names[besti], overallbest)
if plot_roc:
plt.plot(r_fprs, r_tprs, 'k')
for i in range(len(r_fprs)):
if groupsel[i]:
colour = 'r'
else:
colour = 'b'
plt.plot(r_fprs[i], r_tprs[i], colour + 'o')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.xlabel('False positive rate (%)')
plt.ylabel('True positive rate (%)')
plt.savefig(plot_roc)
plt.close()
if write_roc:
f = open(write_roc, 'w')
r = {}
r['tpr'] = r_tprs
r['fpr'] = r_fprs
r['tests'] = testcomb
r['groupsel'] = groupsel
json.dump(r, f)
f.close()
def find_roc_ordered(table,
targetdb,
costratio=[2.5, 1.0],
n_profiles_to_analyse=np.iinfo(np.int32).max,
improve_threshold=1.0,
verbose=True,
plot_roc=True,
write_roc=True,
levelbased=False,
mark_training=False):
'''
Finds optimal tests to include in a QC set.
table - the database table to read;
targetdb - the datable file;
n_profiles_to_analyse - how many profiles to read from the database;
improve_threshold - how much improvement in true positive rate is needed
for a test to be accepted once all groups have been done;
verbose - controls whether messages on progress are output;
plot_roc - controls is a plot is generated;
write_roc - controls whether a text file is generated;
levelbased - controls whether the database is re-read on each iteration of the
processing; if False then profiles are not considered again
once an accepted test has flagged them; if True, the levels that
are flagged are removed so other problems in the profile can be
used to determine the best quality control checks to use.
'''
# Check that the options make sense.
if levelbased:
assert mark_training == False, 'Cannot use mark_training with levelbased'
# Define the order of tests.
ordering = ['Location', 'Range', 'Climatology', 'Increasing depth', 'Constant values',
'Spike or step', 'Gradient', 'Density']
# Read QC test specifications.
groupdefinition = read_qc_groups()
# Create results list.
qclist = []
keepgoing = True
iit = 0
while keepgoing:
if iit < len(ordering):
grouptofind = ordering[iit]
else:
grouptofind = 'any'
if verbose: print('-- Iteration ', iit + 1, ' to find test of type ', grouptofind)
if (iit == 0) or levelbased:
if verbose: print('---- Running database read')
# Read data from database into a pandas data frame.
df = dbutils.db_to_df(table = table,
targetdb = targetdb,
filter_on_wire_break_test = False,
filter_on_tests = groupdefinition,
n_to_extract = n_profiles_to_analyse,
pad=2,
XBTbelow=True)
# mark chosen profiles as part of the training set
if mark_training:
all_uids = main.dbinteract('SELECT uid from ' + table + ';', targetdb=targetdb)
for uid in all_uids:
uid = uid[0]
is_training = int(uid in df['uid'].astype(int).to_numpy())
query = "UPDATE " + table + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
main.dbinteract(query, targetdb=targetdb)
# Drop nondiscriminating tests i.e. those that flag all or none
# of the profiles.
nondiscrim = []
cols = list(df.columns)
for c in cols:
if len(pandas.unique(df[c])) == 1:
nondiscrim.append(c)
if verbose: print(c + ' is nondiscriminating and will be removed')
cols = [t for t in cols if t not in nondiscrim]
df = df[cols]
testNames = df.columns[2:].values.tolist()
# Convert to numpy structures and save copy if first iteration.
truth = df['Truth'].to_numpy()
tests = []
for i, tn in enumerate(testNames):
results = df[tn].to_numpy()
tests.append(results)
cumulative = truth.copy()
cumulative[:] = False
if iit == 0:
alltruth = df['Truth'].to_numpy()
alltests = []
for i, tn in enumerate(testNames):
results = df[tn].to_numpy()
alltests.append(results)
allnames = testNames.copy()
del df # No further need for the data frame.
# Try to select a QC check to add to the set.
if verbose: print('---- Selecting the QC check')
if grouptofind == 'any':
testnamestosearch = testNames
else:
testnamestosearch = groupdefinition['At least one from group'][grouptofind]
# Find the best choice from all the QC tests in this group.
bestchoice = ''
bestcost = return_cost(costratio, 0.0, 100.0)
for tn in testnamestosearch:
if tn in qclist: continue
for itest, name in enumerate(testNames):
if name == tn:
cumulativenew = np.logical_or(cumulative, tests[itest])
tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
newcost = return_cost(costratio, tpr, fpr)
if verbose: print(' ', tpr, fpr, newcost, bestcost, name)
if newcost == bestcost:
if verbose:
print(' ' + bestchoice + ' and ' + name + ' have the same results and the first is kept')
elif newcost < bestcost:
bestchoice = name
bestcost = newcost
besti = itest
besttpr = tpr
# If selecting from any test, need to ensure that it is worth keeping.
if grouptofind == 'any' and bestchoice != '':
ctpr, cfpr, cfnr, ctnr = main.calcRates(cumulative, truth)
currentcost = return_cost(costratio, ctpr, cfpr)
if (currentcost < bestcost or
(besttpr - ctpr) < improve_threshold):
bestchoice = ''
# Record the choice that is made.
if bestchoice == '':
if verbose: print('WARNING: no suitable tests in group "' + grouptofind + '", skipping')
if grouptofind == 'any':
if verbose: print('End of QC test selection')
keepgoing = False
else:
if verbose: print(' ' + bestchoice + ' was selected from group ' + grouptofind)
cumulative = np.logical_or(cumulative, tests[besti])
qclist.append(bestchoice)
groupdefinition['Remove rejected levels'].append(bestchoice)
# Increment iit to move on to the next group.
iit += 1
# Create roc.
cumulative = alltruth.copy()
cumulative[:] = False
r_fprs = []
r_tprs = []
for tn in qclist:
found = False
for itest, name in enumerate(allnames):
if tn == name:
cumulative = np.logical_or(cumulative, alltests[itest])
tpr, fpr, fnr, tnr = main.calcRates(cumulative, alltruth)
r_fprs.append(fpr)
r_tprs.append(tpr)
print('ROC point: ', tpr, fpr, tn)
found = True
assert found, 'Error in constructing ROC'
if plot_roc:
plt.plot(r_fprs, r_tprs, 'k')
for i in range(len(r_fprs)):
colour = 'b'
plt.plot(r_fprs[i], r_tprs[i], colour + 'o')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.xlabel('False positive rate (%)')
plt.ylabel('True positive rate (%)')
plt.savefig(plot_roc)
plt.close()
if write_roc:
f = open(write_roc, 'w')
r = {}
r['tpr'] = r_tprs
r['fpr'] = r_fprs
r['tests'] = qclist
json.dump(r, f)
f.close()
if __name__ == '__main__':
# parse options
options, remainder = getopt.getopt(sys.argv[1:], 't:d:n:c:o:p:hslm')
targetdb = 'iquod.db'
dbtable = 'iquod'
outfile = False
plotfile = False
samplesize = None
costratio = [5.0, 5.0]
ordered = False
levelbased = False
mark_training = False
for opt, arg in options:
if opt == '-d':
dbtable = arg
if opt == '-t':
targetdb = arg
if opt == '-n':
samplesize = int(arg)
if opt == '-c':
costratio = ast.literal_eval(arg)
if opt == '-o':
outfile = arg
if opt == '-p':
plotfile = arg
if opt == '-s':
ordered = True
if opt == '-l':
levelbased = True
if opt == '-m':
mark_training = True
if opt == '-h':
print('usage:')
print('-d <db table name to read from>')
print('-t <name of db file>')
print('-n <number of profiles to consider>')
print('-c <cost ratio array>')
print('-s Find QC tests in a predefined sequence')
print('-l If -s, remove only QCed out levels not profiles on each step')
print('-m If -m, profiles used to generate the ROC will be marked')
print('-o <filename to write json results out to>')
print('-p <filename to write roc plot out to>')
print('-h print this help message and quit')
if samplesize is None:
print('please provide a sample size to consider with the -n flag')
print('-h to print usage')
if ordered:
find_roc_ordered(table=dbtable, targetdb=targetdb, n_profiles_to_analyse=samplesize, costratio=costratio, plot_roc=plotfile, write_roc=outfile, levelbased=levelbased, mark_training=mark_training)
else:
find_roc(table=dbtable, targetdb=targetdb, n_profiles_to_analyse=samplesize, costratio=costratio, plot_roc=plotfile, write_roc=outfile, mark_training=mark_training)
| mit |
BhallaLab/moose-full | moose-examples/snippets/stimtable.py | 2 | 3419 | # stimtable.py ---
#
# Filename: stimtable.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed May 8 18:51:07 2013 (+0530)
# Version:
# Last-Updated: Mon May 27 21:15:36 2013 (+0530)
# By: subha
# Update #: 124
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""Example of StimulusTable using Poisson random numbers.
Creates a StimulusTable and assigns it signal representing events in a
Poisson process. The output of the StimTable is sent to a DiffAmp
object for buffering and then recorded in a regular table.
"""
import numpy as np
from matplotlib import pyplot as plt
import moose
from moose import utils
def stimulus_table_demo():
model = moose.Neutral('/model')
data = moose.Neutral('/data')
# This is the stimulus generator
stimtable = moose.StimulusTable('/model/stim')
recorded = moose.Table('/data/stim')
moose.connect(recorded, 'requestOut', stimtable, 'getOutputValue')
simtime = 100
simdt = 1e-3
# Inter-stimulus-intervals with rate=20/s
rate = 20
np.random.seed(1) # ensure repeatability
isi = np.random.exponential(rate, int(simtime/rate))
# The stimulus times are the cumulative sum of the inter-stimulus intervals.
stimtimes = np.cumsum(isi)
# Select only stimulus times that are within simulation time -
# this may leave out some possible stimuli at the end, but the
# exoected number of Poisson events within simtime is
# simtime/rate.
stimtimes = stimtimes[stimtimes < simtime]
ts = np.arange(0, simtime, simdt)
# Find the indices of table entries corresponding to time of stimulus
stimidx = np.searchsorted(ts, stimtimes)
stim = np.zeros(len(ts))
# Since linear interpolation is forced, we need at least three
# consecutive entries to have same value to get correct
# magnitude. And still we shall be off by at least one time step.
indices = np.concatenate((stimidx-1, stimidx, stimidx+1))
stim[indices] = 1.0
stimtable.vector = stim
stimtable.stepSize = 0 # This forces use of current time as x value for interpolation
stimtable.stopTime = simtime
moose.setClock(0, simdt)
moose.useClock(0, '/model/##,/data/##', 'process')
moose.reinit()
moose.start(simtime)
plt.plot(np.linspace(0, simtime, len(recorded.vector)), recorded.vector, 'r-+', label='generated stimulus')
plt.plot(ts, stim, 'b-x', label='originally assigned values')
plt.ylim((-1, 2))
plt.legend()
plt.title('Exmaple of StimulusTable')
plt.show()
if __name__ == '__main__':
stimulus_table_demo()
#
# stimtable.py ends here
| gpl-2.0 |
fartashf/cleverhans | cleverhans_tutorials/mnist_tutorial_cw.py | 2 | 9527 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.python.platform import flags
import logging
import os
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.utils import pair_visual, grid_visual, AccuracyReport
from cleverhans.utils import set_log_level
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval, tf_model_load
from cleverhans_tutorials.tutorial_models import make_basic_cnn
FLAGS = flags.FLAGS
def mnist_tutorial_cw(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=True, nb_epochs=6,
batch_size=128, nb_classes=10, source_samples=10,
learning_rate=0.001, attack_iterations=100,
model_path=os.path.join("models", "mnist"),
targeted=True):
"""
MNIST tutorial for Carlini and Wagner's attack
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param source_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:param model_path: path to the model file
:param targeted: should we run a targeted attack? or untargeted?
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# MNIST-specific dimensions
img_rows = 28
img_cols = 28
channels = 1
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
sess = tf.Session()
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Define TF model graph
model = make_basic_cnn()
preds = model(x)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': os.path.join(*os.path.split(model_path)[:-1]),
'filename': os.path.split(model_path)[-1]
}
rng = np.random.RandomState([2017, 8, 30])
# check if we've trained before, and if we have, use that pre-trained model
if os.path.exists(model_path + ".meta"):
tf_model_load(sess, model_path)
else:
model_train(sess, x, y, preds, X_train, Y_train, args=train_params,
save=os.path.exists("models"), rng=rng)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using Carlini and Wagner's approach
###########################################################################
nb_adv_per_sample = str(nb_classes - 1) if targeted else '1'
print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Instantiate a CW attack object
cw = CarliniWagnerL2(model, back='tf', sess=sess)
if viz_enabled:
assert source_samples == nb_classes
idxs = [np.where(np.argmax(Y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
if targeted:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols, channels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = np.array(
[[instance] * nb_classes for instance in X_test[idxs]],
dtype=np.float32)
else:
adv_inputs = np.array(
[[instance] * nb_classes for
instance in X_test[:source_samples]], dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(source_samples * nb_classes, img_rows, img_cols, 1))
adv_ys = np.array([one_hot] * source_samples,
dtype=np.float32).reshape((source_samples *
nb_classes, nb_classes))
yname = "y_target"
else:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, 2, img_rows, img_cols, channels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = X_test[idxs]
else:
adv_inputs = X_test[:source_samples]
adv_ys = None
yname = "y"
cw_params = {'binary_search_steps': 1,
yname: adv_ys,
'max_iterations': attack_iterations,
'learning_rate': 0.1,
'batch_size': source_samples * nb_classes if
targeted else source_samples,
'initial_const': 10}
adv = cw.generate_np(adv_inputs,
**cw_params)
eval_params = {'batch_size': np.minimum(nb_classes, source_samples)}
if targeted:
adv_accuracy = model_eval(
sess, x, y, preds, adv, adv_ys, args=eval_params)
else:
if viz_enabled:
adv_accuracy = 1 - \
model_eval(sess, x, y, preds, adv, Y_test[
idxs], args=eval_params)
else:
adv_accuracy = 1 - \
model_eval(sess, x, y, preds, adv, Y_test[
:source_samples], args=eval_params)
if viz_enabled:
for j in range(nb_classes):
if targeted:
for i in range(nb_classes):
grid_viz_data[i, j] = adv[i * nb_classes + j]
else:
grid_viz_data[j, 0] = adv_inputs[j]
grid_viz_data[j, 1] = adv[j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
import matplotlib.pyplot as plt
_ = grid_visual(grid_viz_data)
return report
def main(argv=None):
mnist_tutorial_cw(viz_enabled=FLAGS.viz_enabled,
nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
nb_classes=FLAGS.nb_classes,
source_samples=FLAGS.source_samples,
learning_rate=FLAGS.learning_rate,
attack_iterations=FLAGS.attack_iterations,
model_path=FLAGS.model_path,
targeted=FLAGS.targeted)
if __name__ == '__main__':
flags.DEFINE_boolean('viz_enabled', True, 'Visualize adversarial ex.')
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
flags.DEFINE_integer('source_samples', 10, 'Nb of test inputs to attack')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_string('model_path', os.path.join("models", "mnist"),
'Path to save or load the model file')
flags.DEFINE_boolean('attack_iterations', 100,
'Number of iterations to run attack; 1000 is good')
flags.DEFINE_boolean('targeted', True,
'Run the tutorial in targeted mode?')
tf.app.run()
| mit |
TomAugspurger/pandas | pandas/tests/indexes/categorical/test_constructors.py | 3 | 5372 | import numpy as np
import pytest
from pandas import Categorical, CategoricalDtype, CategoricalIndex, Index
import pandas._testing as tm
class TestCategoricalIndexConstructors:
def test_construction(self):
ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False)
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
# empty
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8"))
assert not result.ordered
# passing categories
result = CategoricalIndex(list("aabbca"), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
c = Categorical(list("aabbca"))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list("abc")))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
assert not result.ordered
ci = CategoricalIndex(c, categories=list("abcd"))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
assert not result.ordered
result = CategoricalIndex(ci, categories=list("ab"))
tm.assert_index_equal(result.categories, Index(list("ab")))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8")
)
assert not result.ordered
result = CategoricalIndex(ci, categories=list("ab"), ordered=True)
tm.assert_index_equal(result.categories, Index(list("ab")))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8")
)
assert result.ordered
result = CategoricalIndex(ci, categories=list("ab"), ordered=True)
expected = CategoricalIndex(
ci, categories=list("ab"), ordered=True, dtype="category"
)
tm.assert_index_equal(result, expected, exact=True)
# turn me to an Index
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False)
result = Index(np.array(ci), dtype="category")
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype="category")
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2], ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_construction_empty_with_bool_categories(self):
# see GH#22702
cat = CategoricalIndex([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_construction_with_categorical_dtype(self):
# construction with CategoricalDtype
# GH#18109
data, cats, ordered = "a a b b".split(), "c b a".split(), True
dtype = CategoricalDtype(categories=cats, ordered=ordered)
result = CategoricalIndex(data, dtype=dtype)
expected = CategoricalIndex(data, categories=cats, ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# GH#19032
result = Index(data, dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
# error when combining categories/ordered and dtype kwargs
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalIndex(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Index(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match=msg):
CategoricalIndex(data, ordered=ordered, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Index(data, ordered=ordered, dtype=dtype)
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
yt-project/unyt | unyt/__init__.py | 1 | 4014 | """
The unyt package.
Note that the symbols defined in :mod:`unyt.physical_constants` and
:mod:`unyt.unit_symbols` are importable from this module. For example::
>>> from unyt import km, clight
>>> print((km/clight).to('ns'))
3335.64095198152 ns
In addition, the following functions and classes are importable from the
top-level ``unyt`` namespace:
* :func:`unyt.array.loadtxt`
* :func:`unyt.array.savetxt`
* :func:`unyt.test`
* :func:`unyt.array.uconcatenate`
* :func:`unyt.array.ucross`
* :func:`unyt.array.udot`
* :func:`unyt.array.uhstack`
* :func:`unyt.array.uintersect1d`
* :func:`unyt.array.unorm`
* :func:`unyt.array.ustack`
* :func:`unyt.array.uunion1d`
* :func:`unyt.array.uvstack`
* :class:`unyt.array.unyt_array`
* :class:`unyt.array.unyt_quantity`
* :func:`unyt.unit_object.define_unit`
* :class:`unyt.unit_object.Unit`
* :class:`unyt.unit_registry.UnitRegistry`
* :class:`unyt.unit_systems.UnitSystem`
* :func:`unyt.testing.assert_allclose_units`
* :func:`unyt.array.allclose_units`
* :func:`unyt.dimensions.accepts`
* :func:`unyt.dimensions.returns`
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2018, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
try:
import numpy as np
try:
from pkg_resources import parse_version
npv = np.__version__
if parse_version(npv) < parse_version("1.13.0"): # pragma: no cover
raise RuntimeError(
"The unyt package requires NumPy 1.13 or newer but NumPy %s "
"is installed" % npv
)
del parse_version, npv
except ImportError: # pragma: no cover
# setuptools isn't installed so we don't try to check version numbers
pass
del np
except ImportError: # pragma: no cover
raise RuntimeError("The unyt package requires numpy but numpy is not installed.")
try:
import sympy
del sympy
except ImportError: # pragma: no cover
raise RuntimeError("The unyt package requires sympy but sympy is not installed.")
from ._version import get_versions
from unyt import unit_symbols
from unyt import physical_constants
from unyt.array import ( # NOQA: F401
loadtxt,
savetxt,
uconcatenate,
ucross,
udot,
uhstack,
uintersect1d,
unorm,
ustack,
uunion1d,
uvstack,
unyt_array,
unyt_quantity,
allclose_units,
)
from unyt.unit_object import Unit, define_unit # NOQA: F401
from unyt.unit_registry import UnitRegistry # NOQA: F401
from unyt.unit_systems import UnitSystem # NOQA: F401
from unyt.testing import assert_allclose_units # NOQA: F401
from unyt.dimensions import accepts, returns # NOQA: F401
try:
from unyt.mpl_interface import matplotlib_support # NOQA: F401
except ImportError:
pass
else:
matplotlib_support = matplotlib_support()
# function to only import quantities into this namespace
# we go through the trouble of doing this instead of "import *"
# to avoid including extraneous variables (e.g. floating point
# constants used to *construct* a physical constant) in this namespace
def import_units(module, namespace):
"""Import Unit objects from a module into a namespace"""
for key, value in module.__dict__.items():
if isinstance(value, (unyt_quantity, Unit)):
namespace[key] = value
import_units(unit_symbols, globals())
import_units(physical_constants, globals())
del import_units
__version__ = get_versions()["version"]
del get_versions
def test(): # pragma: no cover
"""Execute the unit tests on an installed copy of unyt.
Note that this function requires pytest to run. If pytest is not
installed this function will raise ImportError.
"""
import pytest
import os
pytest.main([os.path.dirname(os.path.abspath(__file__))])
| bsd-3-clause |
heli522/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
GarmanGroup/RABDAM | tests/test_bnet_calculation.py | 1 | 8856 |
# RABDAM
# Copyright (C) 2020 Garman Group, University of Oxford
# This file is part of RABDAM.
# RABDAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# RABDAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# An outer layer to the pipeline scripts. Depending upon the flags specified
# in the command line input, this script will run either the complete / a
# subsection of the pipeline.
# python -m unittest tests/test_bnet_calculation.py
import os
import unittest
from rabdam.Subroutines.CalculateBDamage import rabdam
class TestClass(unittest.TestCase):
def test_bnet_values(self):
"""
Checks that RABDAM calculates expected Bnet values for a selection of
PDB entries
"""
import os
import requests
import shutil
import pandas as pd
exp_bnet_dict = {'2O2X': 3.300580966,
'4EZF': 3.193514624,
'4MWU': 3.185476349,
'4MOV': 3.144130191,
'3NBM': 3.141821366,
'1GW1': 3.105626889,
'4EWE': 3.08241654,
'3F1P': 3.060628186,
'3IV0': 3.054440912,
'4ZWV': 3.017330004,
'1T2I': 3.004830448,
'3LX3': 2.962424378,
'5P4N': 2.916582486,
'5MAA': 2.91219352,
'1E73': 2.850203561,
'1YKI': 2.797739814,
'4WA4': 2.720540993,
'3V2J': 2.669599635,
'3CUI': 2.666605946,
'4XLA': 2.624366813,
'4DUK': 2.854175949,
'3V38': 2.500984382,
'1VJF': 2.496374854,
'5IO2': 2.467587911,
'5CM7': 2.44869046,
'2EHU': 2.448290431,
'5JOW': 2.439619791,
'2C54': 2.379224017,
'4GZK': 2.349526276,
'2NUM': 2.326904729,
'5FYO': 2.319618192,
'4ODK': 2.304354685,
'6EV4': 2.302433369,
'5P5U': 2.288966997,
'3VHV': 2.285877338,
'4JCK': 2.27150332,
'5EKM': 2.258574341,
'3H4O': 2.231817033,
'5JIG': 2.247664542,
'2H5S': 2.206850226,
'4M5I': 2.169405117,
'1Y59': 2.138787261,
'4C45': 2.131256276,
'5F90': 2.11287042,
'4NI3': 2.088735516,
'4Z6N': 2.083743584,
'5M2G': 2.06566475,
'5ER6': 2.05707889,
'4R0X': 2.006996308,
'5LLG': 1.981501196,
'1FCX': 1.976990791,
'5M90': 1.96542442,
'3NJK': 1.955577757,
'5CWG': 1.949818624,
'2P7O': 1.921138477,
'5SZC': 1.962633169,
'2I0K': 1.901555841,
'4RDK': 1.886900766,
'5MA0': 1.877853781,
'4C1E': 1.877575448,
'5EJ3': 1.875439995,
'2WUG': 1.87334953,
'4MPY': 1.842338963,
'4OTZ': 1.835716553,
'4IOO': 1.828349113,
'4Z6O': 1.800528596,
'4ZOT': 1.799163077,
'5PHB': 1.783879628,
'3UJC': 1.747894856,
'4FR8': 1.738876799,
'5PH8': 1.736825591,
'5UPM': 1.736663507,
'3MWX': 1.733132746,
'4KDX': 1.729650659,
'3WH5': 1.717975404,
'4P04': 1.714107945,
'5Y90': 1.695283923,
'4H31': 1.674014779,
'5HJE': 1.662869176,
'4YKK': 1.653894709,
'1Q0F': 1.646880018,
'5JP6': 1.629246723,
'1X7Y': 1.618817315,
'4ZC8': 1.60606196,
'5EPE': 1.604407869,
'4ZS9': 1.582398487,
'5VNX': 1.543824945,
'5IHV': 1.542271159,
'5J90': 1.526469901,
'4K6W': 1.520316883,
'3PBC': 1.512738972,
'5CMB': 1.504620762,
'4PSC': 1.491796934,
'5UPN': 1.477252783,
'4XLZ': 1.473298738,
'4XGY': 1.465885549,
'5M4G': 1.400219288,
'3A54': 1.319587779}
if not os.path.isdir('tests/temp_files/'):
os.mkdir('tests/temp_files/')
for code, exp_bnet in exp_bnet_dict.items():
# Checks cif file
cif_text = requests.get('https://files.rcsb.org/view/%s.cif' % code)
with open('tests/temp_files/%s.cif' % code, 'w') as f:
f.write(cif_text.text)
rabdam_run = rabdam(
pathToInput='%s/tests/temp_files/%s.cif' % (os.getcwd(), code),
outputDir='%s/tests/temp_files/' % os.getcwd(),
batchRun=True,
overwrite=True,
PDT=7,
windowSize=0.02,
protOrNA='protein',
HETATM=False,
removeAtoms=[],
addAtoms=[],
highlightAtoms=[],
createOrigpdb=False,
createAUpdb=False,
createUCpdb=False,
createAUCpdb=False,
createTApdb=False
)
rabdam_run.rabdam_dataframe(test=True)
rabdam_run.rabdam_analysis(
output_options=['csv', 'pdb', 'cif', 'kde', 'bnet', 'summary']
)
bnet_df = pd.read_pickle('tests/temp_files/Logfiles/Bnet_protein.pkl')
act_bnet_cif = bnet_df['Bnet'].tolist()[-1]
self.assertEqual(round(exp_bnet, 7), round(act_bnet_cif, 7))
os.remove('tests/temp_files/%s.cif' % code)
os.remove('tests/temp_files/Logfiles/Bnet_protein.pkl')
# Checks PDB file
pdb_text = requests.get('https://files.rcsb.org/view/%s.pdb' % code)
with open('tests/temp_files/%s.pdb' % code, 'w') as f:
f.write(pdb_text.text)
rabdam_run = rabdam(
pathToInput='%s/tests/temp_files/%s.pdb' % (os.getcwd(), code),
outputDir='%s/tests/temp_files/' % os.getcwd(),
batchRun=True,
overwrite=True,
PDT=7,
windowSize=0.02,
protOrNA='protein',
HETATM=False,
removeAtoms=[],
addAtoms=[],
highlightAtoms=[],
createOrigpdb=False,
createAUpdb=False,
createUCpdb=False,
createAUCpdb=False,
createTApdb=False
)
rabdam_run.rabdam_dataframe(test=True)
rabdam_run.rabdam_analysis(
output_options=['csv', 'pdb', 'cif', 'kde', 'bnet', 'summary']
)
bnet_df = pd.read_pickle(
'%s/tests/temp_files/Logfiles/Bnet_protein.pkl' % os.getcwd()
)
act_bnet_pdb = bnet_df['Bnet'].tolist()[-1]
self.assertEqual(round(exp_bnet, 7), round(act_bnet_pdb, 7))
os.remove('tests/temp_files/%s.pdb' % code)
os.remove('tests/temp_files/Logfiles/Bnet_protein.pkl')
shutil.rmtree('tests/temp_files/')
| lgpl-3.0 |
saketkc/hatex | 2019_Spring/CSCI-572/HW05/CSCI572_HW5/dash_app.py | 1 | 4154 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table as dt
import solr
import pandas as pd
CRAWL_DATA_DIR = "/media/rna/yahoo_crawl_data/Yahoo-20190406T235503Z-001/Yahoo/yahoo"
CRAWL_CSV_MAP = "/media/rna/yahoo_crawl_data/Yahoo-20190406T235503Z-001/Yahoo/URLtoHTML_yahoo_news.csv"
SOLR_CONNECTION_URL = "http://nucleus.usc.edu:8983/solr/myexample"
FILENAME_TO_URL_DF = pd.read_csv(CRAWL_CSV_MAP)
COLUMNS = ["Title", "URL", "ID", "Description"]
SOLR = solr.SolrConnection(SOLR_CONNECTION_URL)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
def Table(dataframe):
rows = []
for i in range(len(dataframe)):
row = []
for col in dataframe.columns:
value = dataframe.iloc[i][col]
# update this depending on which
# columns you want to show links for
# and what you want those links to be
if col == "URL" or col == "Title":
cell = html.Td(html.A(href=value, children=value))
else:
cell = html.Td(children=value)
row.append(cell)
rows.append(html.Tr(row))
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])]
+ rows
)
# app = dash.Dash()
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/dZVMbK.css"})
app.layout = html.Div(
children=[
html.H1(children="Yahoo Solr Client"),
# html.Div(
# children="""
# Yahoo Solr Client.
# """
# ),
html.Label("Search term: "),
dcc.Input(id="search-term", value="", type="text"),
dcc.Dropdown(
id="search-option",
options=[
{"label": "Lucene (Solr Default)", "value": "solr"},
{"label": "PageRank", "value": "pageRank"},
],
value="solr",
style={"width": "50%", "font-family": "Droid Serif"},
),
html.Button(id="submit", type="submit", children="Submit", n_clicks=0),
html.Div(id="results"),
]
)
def write_search_results(
table,
search_term,
search_option="solr",
path="/media/dna/github/hatex/2019_Spring/CSCI-572/HW04/search_results",
):
table[["URL"]].to_csv(
"{}/{}.{}.tsv".format(path, search_term, search_option),
index=False,
header=True,
)
@app.callback(
Output(component_id="results", component_property="children"),
[
Input("search-term", "value"),
Input("search-option", "value"),
Input("submit", "n_clicks"),
],
)
def update_output_div(search_term, search_option, n_clicks):
if search_option == "solr":
response = SOLR.query(search_term)
else:
response = SOLR.query(search_term.strip(), sort="pageRankFile desc")
data = []
for hit in response.results:
filename = hit["id"].replace(CRAWL_DATA_DIR, "")[1:]
url = FILENAME_TO_URL_DF[FILENAME_TO_URL_DF.filename == filename].URL.iloc[0]
url_text = url # "<a href='{0}'>{0}</a>".format(url)
try:
description = hit["description"]
except KeyError:
description = ""
record = {
"Title": hit["title"],
"ID": hit["id"],
"Description": description,
"URL": url_text,
}
data.append(record)
table = dt.DataTable(
columns=[{"name": column, "id": column} for column in COLUMNS],
id="results-table",
data=data,
style_data={"whiteSpace": "normal"},
css=[
{
"selector": ".dash-cell div.dash-cell-value",
"rule": "display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;",
}
],
)
df = pd.DataFrame(data)
write_search_results(df, search_term, search_option)
return Table(df)
if __name__ == "__main__":
app.run_server(host="nucleus.usc.edu", debug=True)
| mit |
Takonan/csc411_a3 | utils.py | 1 | 30931 | from scipy.io import loadmat
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
import theano
import yaml
from keras.models import *
def remove_mean(inputs):
""" Remove the mean of each individual images.
Assumes that the inputs is Nx1xRxC, where N is number of examples, and R,C is dimension of image
"""
# Flatten
inputs = np.reshape(inputs, (inputs.shape[0], inputs.shape[1]*inputs.shape[2]*inputs.shape[3]))
inputs_mean = inputs.mean(axis=1) # Average for each row. Dimension = N rows
inputs_mean = inputs_mean.reshape(inputs_mean.shape[0], 1) # Make inputs_mean Nx1
inputs_mean = np.tile(inputs_mean, inputs.shape[1]) # Make inputs_mean NxD
inputs -= inputs_mean
inputs = np.reshape(inputs, (inputs.shape[0], 1,32,32))
return inputs
def zca_whitening(inputs, epsilon=0.1):
""" Assumes that the inputs is a Nx1xRxC, where N is number of examples, and R,C is dimension of image
epsilon is Whitening constant, it prevents division by zero.
Output: ZCAMatrix. Multiply this with the inputs (shape(1,dimension))
Based on: http://ufldl.stanford.edu/wiki/index.php/Implementing_PCA/Whitening
http://stackoverflow.com/questions/31528800/how-to-implement-zca-whitening-python
"""
inputs = np.reshape(inputs, (inputs.shape[0], inputs.shape[1]*inputs.shape[2]*inputs.shape[3]))
sigma = np.dot(inputs.T, inputs)/inputs.shape[0] #Correlation matrix
U,S,V = np.linalg.svd(sigma) #Singular Value Decomposition
# print sigma.shape
# print U.shape
# print S.shape
# print V.shape
# print np.diag(1.0/np.sqrt(S + epsilon)).shape
ZCAMatrix = np.dot(np.dot(U, np.diag(1.0/np.sqrt(S + epsilon))), U.T) #ZCA Whitening matrix
return ZCAMatrix #Data whitening
def preprocess_images(inputs):
""" Assumes input is a N X D (ex N x 1024) array of image data.
Applies:
- Reshape for CNN (N X 1 X 32 x 32)
- Convert to FloatX
- Normalizing to 0-1 range (/255)
- Remove mean for each example
Returns a N X 1 X 32 X 32 array
"""
inputs = inputs.reshape(inputs.shape[0], 1, 32,32) # For CNN model
inputs = inputs.astype(theano.config.floatX)
print "Normalizing to 0-1 range for input..."
inputs /= 255
print "Removing mean for each example in input..."
inputs = remove_mean(inputs)
return inputs
def save_output_csv(filename, pred):
"""Save the prediction in a filename."""
with open(filename, 'w') as f_result:
f_result.write('Id,Prediction\n')
for i, y in enumerate(pred, 1):
f_result.write('{},{}\n'.format(i,y))
num_entries = 1253
for i in range(len(pred)+1, num_entries+1):
f_result.write('{},{}\n'.format(i,0))
return
def region_std(source,rec_size):
# Compute the standard deviation in the 2*rec_size + 1 x 2*rec_size+1 square around each pixel
row, col = source.shape
output = np.zeros(source.shape)
for r in range(row):
for c in range(col):
output[r,c] = np.std(source[max(0,r-rec_size):min(row,r+rec_size+1),max(0,c-rec_size):min(col,c+rec_size+1)])
return output
def ShowMeans(means):
"""Show the cluster centers as images."""
plt.figure() # HC: Removed '1' inside figure so it creates a new figure each time.
plt.clf()
for i in xrange(means.shape[0]):
plt.subplot(1, means.shape[0], i+1)
plt.imshow(means[i,0,:,:], cmap=plt.cm.gray)
plt.draw()
plt.show() # HC: Added show line
# raw_input('Press Enter.')
def load_public_test():
""" Loads the labeled data images, targets, and identities (sorted).
"""
# Load the public test set
data = loadmat('public_test_images.mat')
images = data['public_test_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
# Load the hidden test set
data = loadmat('hidden_test_images.mat')
# Append the hidden test set to the public test set
images = np.append(images, data['hidden_test_images'].T, axis=0)
inputs = np.transpose(images, (0, 2, 1))
inputs = inputs[:, :, ::-1]
inputs = inputs.reshape(images.shape[0], images.shape[1]*images.shape[2])
return inputs
def load_data_with_identity(include_mirror=False):
""" Loads the labeled data images, targets, and identities (sorted).
"""
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
targets = targets_sort[:,0]
# Sort the images
temp = np.append(images,identities,1)
inputs_sort = temp[temp[:,-1].argsort()]
inputs = inputs_sort[:,0:-1]
# Return sorted identities:
identities = inputs_sort[:,-1]
return inputs, targets, identities
def load_data_with_identity_uniform(include_mirror=False):
""" Loads the labeled data images, targets, and identities (sorted).
Makes sure that the number of examples for each label is somewhat similar
by deleting 4's and 7's target examples before returning. Keeps the first ~315 examples.
"""
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Get the images into the desired form: num_examples x 32 x 32 (and face is rotated in proper orientation)
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# Sort the array based on the tr_labels
# Sort the identities
temp = np.append(identities,targets,1)
identities_sort = temp[temp[:,1].argsort()]
identities = identities_sort[:,0]
# Sort the images
temp = np.append(images,targets,1)
images_sort = temp[temp[:,-1].argsort()]
images = images_sort[:,0:-1]
# Return sorted targets:
targets = images_sort[:,-1]
# Throw away some of the ones where the labels are 4 and 7
indices_4 = np.where(targets == 4) # Index values where targets[indices_4] == 4
start_index = 316 # Start index of where to delete the 4's
end_index = indices_4[0][-1] + 1
targets = np.delete(targets, indices_4[0][start_index:end_index])
images = np.delete(images, indices_4[0][start_index:end_index], axis=0) # Delete the rows specified by indices_4[0][start:end]
identities = np.delete(identities, indices_4[0][start_index:end_index])
# Throw away some of the 7's
indices_7 = np.where(targets == 7) # Index values where targets[indices_4] == 4
start_index = 316 # Start index of where to delete the 4's
end_index = indices_7[0][-1] + 1
targets = np.delete(targets, indices_7[0][start_index:end_index])
images = np.delete(images, indices_7[0][start_index:end_index], axis=0)
identities = np.delete(identities, indices_7[0][start_index:end_index])
print "Images shape: ", images.shape
print "Targets shape: ", targets.shape
print "identities shape: ", identities.shape
# Generate a mirrored version if necessary:
if include_mirror:
# Unflatten the images
images = images.reshape(images.shape[0], 32, 32)
# Created mirrored instances
mirrored_faces = images[:,:,::-1]
images = np.append(images, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
return images, targets, identities
def reload_data_with_identity_normalized():
""" Reloads the normalized data. Include mirror.
"""
inputs = np.load('labeled_inputs_normalized.npy')
targets = np.load('labeled_targets_normalized.npy')
identities = np.load('labeled_identities_normalized.npy')
return inputs, targets, identities
def load_data_with_identity_normalized(include_mirror=False):
""" Loads the labeled data images, targets, and identities (sorted)
and normalize the intensities of each image.
"""
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Preprocess the data to normalize intensities
for i in range(images.shape[0]):
filt = np.array([[1,2,1],[2,4,2],[1,2,1]])
gaussian = filt.astype(float)/filt.sum()
gaussian_filter = signal.convolve2d(images[i,:,:], gaussian, boundary='symm', mode='same')
std_filter = region_std(images[i,:,:],1)
final = (images[i,:,:] - gaussian_filter).astype(float)/std_filter
final = (((final/np.amax(final))+1)*128).astype(int)
images[i,:,:] = final[:,:]
print 'Done image ', i
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
targets = targets_sort[:,0]
# Sort the images
temp = np.append(images,identities,1)
inputs_sort = temp[temp[:,-1].argsort()]
inputs = inputs_sort[:,0:-1]
# Return sorted identities:
identities = inputs_sort[:,-1]
outfile = open('labeled_inputs_normalized.npy','w')
np.save(outfile,inputs)
outfile.close()
outfile = open('labeled_targets_normalized.npy','w')
np.save(outfile,targets)
outfile.close()
outfile = open('labeled_identities_normalized.npy','w')
np.save(outfile,identities)
outfile.close()
return inputs, targets, identities
def load_data(include_mirror=False):
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# targets = np.squeeze(data['tr_labels'])
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
num_unlabeled = sum(targets_sort[:,1] == -1)
valid_targets = targets_sort[0:num_unlabeled,0]
train_targets = targets_sort[(num_unlabeled + 1):,0]
# Sort the images
temp = np.append(images,identities,1)
images_sort = temp[temp[:,-1].argsort()]
valid_inputs = images_sort[0:num_unlabeled,0:-1] # Tested that the -1 omits the identity last column
train_inputs = images_sort[(num_unlabeled + 1):,0:-1]
return train_inputs, train_targets, valid_inputs, valid_targets
def load_data_with_test(include_mirror=False):
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# targets = np.squeeze(data['tr_labels'])
# # Sort the array based on the tr_identities
# # Sort the targets
# temp = np.append(targets,identities,1)
# targets_sort = temp[temp[:,1].argsort()]
# num_unlabeled = sum(targets_sort[:,1] == -1)
# valid_targets = targets_sort[0:num_unlabeled,0]
# test_targets = targets_sort[(num_unlabeled + 1):2*num_unlabeled,0]
# train_targets = targets_sort[(2*num_unlabeled + 1):,0]
# # Sort the images
# temp = np.append(images,identities,1)
# images_sort = temp[temp[:,-1].argsort()]
# valid_inputs = images_sort[0:num_unlabeled,0:-1] # Tested that the -1 omits the identity last column
# test_inputs = images_sort[(num_unlabeled + 1):2*num_unlabeled,0:-1]
# train_inputs = images_sort[(2*num_unlabeled + 1):,0:-1]
# DEBUG: Only use half of the unlabeled identity for validation
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
num_unlabeled = sum(targets_sort[:,1] == -1)
valid_targets = targets_sort[0:num_unlabeled/2,0]
test_targets = targets_sort[(num_unlabeled/2 + 1):num_unlabeled,0]
train_targets = targets_sort[(num_unlabeled + 1):,0]
# Sort the images
temp = np.append(images,identities,1)
images_sort = temp[temp[:,-1].argsort()]
valid_inputs = images_sort[0:num_unlabeled/2,0:-1] # Tested that the -1 omits the identity last column
test_inputs = images_sort[(num_unlabeled/2 + 1):num_unlabeled,0:-1]
train_inputs = images_sort[(num_unlabeled + 1):,0:-1]
# End debug
return train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets
def reload_data_with_test_normalized():
# Only load the original training data set that has intensity normalized
train_inputs = np.load('train_inputs.npy')
train_targets = np.load('train_targets.npy')
valid_inputs = np.load('valid_inputs.npy')
valid_targets = np.load('valid_targets.npy')
test_inputs = np.load('test_inputs.npy')
test_targets = np.load('test_targets.npy')
return train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets
def load_data_with_test_normalized(include_mirror=False):
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Preprocess the data to normalize intensities
for i in range(images.shape[0]):
# ShowMeans(images[i:(i+1)])
# print images[i,:,:]
# print "ith image shape: ", images[i,:,:].shape
filt = np.array([[1,2,1],[2,4,2],[1,2,1]])
gaussian = filt.astype(float)/filt.sum()
gaussian_filter = signal.convolve2d(images[i,:,:], gaussian, boundary='symm', mode='same')
# print "Gaussian filter:"
# print gaussian_filter
std_filter = region_std(images[i,:,:],1)
# print "std_filter:"
# print std_filter
final = (images[i,:,:] - gaussian_filter).astype(float)/std_filter
# print "Final:"
# print final
# print 'final image shape: ', final.shape
# print "Largest and smallest value in final:", np.amax(final), np.amin(final)
final = (((final/np.amax(final))+1)*128).astype(int)
images[i,:,:] = final[:,:]
# print images[i,:,:]
# # Output STD
# output_std = np.zeros((1,32,32))
# output_std[0,:,:] = std_filter[:,:]
# # print output_std.shape
# ShowMeans(output_std)
# # Output gaussian:
# output = np.zeros((1,32,32))
# output[0,:,:] = gaussian_filter[:,:]
# ShowMeans(output)
# ShowMeans(images[i:(i+1)])
print 'Done image ', i
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# targets = np.squeeze(data['tr_labels'])
# # Sort the array based on the tr_identities
# # Sort the targets
# temp = np.append(targets,identities,1)
# targets_sort = temp[temp[:,1].argsort()]
# num_unlabeled = sum(targets_sort[:,1] == -1)
# valid_targets = targets_sort[0:num_unlabeled,0]
# test_targets = targets_sort[(num_unlabeled + 1):2*num_unlabeled,0]
# train_targets = targets_sort[(2*num_unlabeled + 1):,0]
# # Sort the images
# temp = np.append(images,identities,1)
# images_sort = temp[temp[:,-1].argsort()]
# valid_inputs = images_sort[0:num_unlabeled,0:-1] # Tested that the -1 omits the identity last column
# test_inputs = images_sort[(num_unlabeled + 1):2*num_unlabeled,0:-1]
# train_inputs = images_sort[(2*num_unlabeled + 1):,0:-1]
# DEBUG: Only use half of the unlabeled identity for validation
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
num_unlabeled = sum(targets_sort[:,1] == -1)
valid_targets = targets_sort[0:num_unlabeled/2,0]
test_targets = targets_sort[(num_unlabeled/2 + 1):num_unlabeled,0]
train_targets = targets_sort[(num_unlabeled + 1):,0]
# Sort the images
temp = np.append(images,identities,1)
images_sort = temp[temp[:,-1].argsort()]
valid_inputs = images_sort[0:num_unlabeled/2,0:-1] # Tested that the -1 omits the identity last column
test_inputs = images_sort[(num_unlabeled/2 + 1):num_unlabeled,0:-1]
train_inputs = images_sort[(num_unlabeled + 1):,0:-1]
# End debug
return train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets
def load_data_with_test_32x32(include_mirror=False):
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# targets = np.squeeze(data['tr_labels'])
# # Sort the array based on the tr_identities
# # Sort the targets
# temp = np.append(targets,identities,1)
# targets_sort = temp[temp[:,1].argsort()]
# num_unlabeled = sum(targets_sort[:,1] == -1)
# valid_targets = targets_sort[0:num_unlabeled,0]
# test_targets = targets_sort[(num_unlabeled + 1):2*num_unlabeled,0]
# train_targets = targets_sort[(2*num_unlabeled + 1):,0]
# # Sort the images
# temp = np.append(images,identities,1)
# images_sort = temp[temp[:,-1].argsort()]
# valid_inputs = images_sort[0:num_unlabeled,0:-1] # Tested that the -1 omits the identity last column
# test_inputs = images_sort[(num_unlabeled + 1):2*num_unlabeled,0:-1]
# train_inputs = images_sort[(2*num_unlabeled + 1):,0:-1]
# DEBUG: Only use half of the unlabeled identity for validation
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
num_unlabeled = sum(targets_sort[:,1] == -1)
valid_targets = targets_sort[0:num_unlabeled/2,0]
test_targets = targets_sort[(num_unlabeled/2 + 1):num_unlabeled,0]
train_targets = targets_sort[(num_unlabeled + 1):,0]
# Sort the images
temp = np.append(images,identities,1)
images_sort = temp[temp[:,-1].argsort()]
print data['tr_images'].T.shape[1], data['tr_images'].T.shape[2]
images_sort = images_sort[:,0:-1].reshape(images.shape[0], data['tr_images'].T.shape[1], data['tr_images'].T.shape[2])
valid_inputs = images_sort[0:num_unlabeled/2,:,:] # Tested that the -1 omits the identity last column
test_inputs = images_sort[(num_unlabeled/2 + 1):num_unlabeled,:,:]
train_inputs = images_sort[(num_unlabeled + 1):,:,:]
# End debug
return train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets
def load_data_one_of_k(include_mirror=False):
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# targets = np.squeeze(data['tr_labels'])
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
num_unlabeled = sum(targets_sort[:,1] == -1)
temp = np.zeros((temp.shape[0], 7))
for i in range(temp.shape[0]):
temp[i][targets_sort[i,0]-1] = 1
valid_targets = temp[0:num_unlabeled,:]
train_targets = temp[(num_unlabeled+1):,:]
# Sort the images
temp = np.append(images,identities,1)
images_sort = temp[temp[:,-1].argsort()]
valid_inputs = images_sort[0:num_unlabeled,0:-1] # Tested that the -1 omits the identity last column
train_inputs = images_sort[(num_unlabeled + 1):,0:-1]
return train_inputs, train_targets, valid_inputs, valid_targets
def load_data_with_test_one_of_k(include_mirror=False):
data = loadmat('labeled_images.mat')
images = data['tr_images'].T # Transpose so the number of images is in first dimension: 2925, 32, 32
targets = data['tr_labels']
identities = data['tr_identity']
# Generate a mirrored version if necessary:
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
identities = np.append(identities, identities,0)
targets = np.append(targets, targets,0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten the 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
# targets = np.squeeze(data['tr_labels'])
# DEBUG: Only use half of the unlabeled identity for validation
# Sort the array based on the tr_identities
# Sort the targets
temp = np.append(targets,identities,1)
targets_sort = temp[temp[:,1].argsort()]
num_unlabeled = sum(targets_sort[:,1] == -1)
temp = np.zeros((temp.shape[0], 7))
for i in range(temp.shape[0]):
temp[i][targets_sort[i,0]-1] = 1
valid_targets = temp[0:num_unlabeled/2,:]
test_targets = temp[(num_unlabeled/2+1):num_unlabeled,:]
train_targets = temp[(num_unlabeled+1):,:]
# Sort the images
temp = np.append(images,identities,1)
images_sort = temp[temp[:,-1].argsort()]
valid_inputs = images_sort[0:num_unlabeled/2,0:-1] # Tested that the -1 omits the identity last column
test_inputs = images_sort[(num_unlabeled/2 + 1):num_unlabeled,0:-1]
train_inputs = images_sort[(num_unlabeled + 1):,0:-1]
# End debug
return train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets
def load_unlabeled_data(include_mirror=False):
data = loadmat('unlabeled_images.mat')
images = data['unlabeled_images'].T
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Flatten 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
return images
def load_unlabeled_data_normalized(include_mirror=False):
data = loadmat('unlabeled_images.mat')
images = data['unlabeled_images'].T
if include_mirror:
mirrored_faces = np.transpose(images, (0,2,1))
rotated_faces = mirrored_faces[:,:,::-1]
images = np.append(rotated_faces, mirrored_faces, 0)
else:
images = np.transpose(images, (0,2,1))[:,:,::-1]
# Preprocess the data to normalize intensities
for i in range(images.shape[0]):
filt = np.array([[1,4,7,4,1],[4,16,26,16,4],[7,26,41,26,7],[4,16,26,16,4],[1,4,7,4,1]])
gaussian = filt.astype(float)/filt.sum()
gaussian_filter = signal.convolve2d(images[i,:,:], gaussian, boundary='symm', mode='same')
std_filter = region_std(images[i,:,:],2)
final = (images[i,:,:] - gaussian_filter).astype(float)/std_filter
final = (((final/np.amax(final))+1)*128).astype(int)
images[i,:,:] = final[:,:]
print 'Done image ', i
# Flatten 32x32 to 1024 1D
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
outfile = open('unlabeled_normalized.npy','w')
np.save(outfile,images)
outfile.close()
return images
def reload_unlabeled_data_normalized(include_mirror=False):
# Just load from the data that's been pre-processed
images = np.load('unlabeled_normalized.npy')
return images
def NN_bag_predict_unlabeled(model_checkpoint='model.yaml', weights_checkpoint='NNweights_', num_models=8, useZCA=True):
# Perform predictions of unlabeled images based on a majority vote scheme of all NNs from k fold cross validation, leave out the first fold because of low performance
model_stream = file(model_checkpoint, 'r')
test_model = model_from_yaml(yaml.safe_load(model_stream))
# Load and preprocess test set
x_test = load_public_test()
x_test = x_test.reshape(x_test.shape[0], 1, 32, 32)
x_test = preprocess_images(x_test)
if useZCA:
ZCAMatrix = np.load('ZCAMatrix.npy')
x_test = np.dot(x_test.reshape(x_test.shape[0],x_test.shape[1]*x_test.shape[2]*x_test.shape[3]),ZCAMatrix)
x_test = x_test.reshape(x_test.shape[0], 1, 32,32)
print "Processed test input with ZCAMatrix"
print "Finished loading test model"
predictions = np.zeros((x_test.shape[0], num_models), dtype=int)
agg_pred = np.zeros((x_test.shape[0], ), dtype=int)
for i in np.arange(1, num_models):
test_model.load_weights(weights_checkpoint + "{:d}.h5".format(i))
predictions[:, i] = (test_model.predict_classes(x_test) + 1).astype(int)
predictions = predictions.astype(int)
print predictions
for i in np.arange(x_test.shape[0]):
agg_pred[i] += np.argmax(np.bincount(predictions[i, :]))
print agg_pred
save_output_csv("bagged_CNN_test_predictions.csv", agg_pred)
return
def plot_training_loss_accuracy(data='training_stats.npy'):
training_score = np.load(data)
non_zero_indices = np.nonzero(training_score[:,0])[0]
start_index = non_zero_indices[0]
end_index = non_zero_indices[-1]
nb_epoch = end_index # Not training_score.shape[0] anymore
plt.plot(xrange(nb_epoch), training_score[start_index:end_index,0], label='Training loss')
plt.plot(xrange(nb_epoch), training_score[start_index:end_index,2], label='Validation loss')
plt.legend()
plt.title('Training and Validation Cross Entropy Loss Vs. Epoch')
plt.xlabel('Number of epochs')
plt.ylabel('Cross entropy loss')
plt.savefig('train_loss_vs_epoch.png', dpi=200)
plt.figure()
plt.plot(xrange(nb_epoch), training_score[start_index:end_index,1], label='Training accuracy')
plt.plot(xrange(nb_epoch), training_score[start_index:end_index,3], label='Validation accuracy')
plt.legend(loc=4)
plt.title('Training and Validation Accuracy Vs. Epoch')
plt.xlabel('Number of epochs')
plt.ylabel('Accuracy')
plt.savefig('train_acc_vs_epoch.png', dpi=200)
def plot_kfold_validation_accuracy(data='fold_val_acc.npy'):
mean_acc_vec = np.zeros((4, 2))
max_acc_vec = np.zeros((4, 2))
i = 0
for x in xrange(3, 11, 2):
print '{:d}fold_val_acc.npy'.format(x)
fold_acc = np.load('{:d}fold_val_acc.npy'.format(x))
mean_acc_vec[i][0] = x
max_acc_vec[i][0] = x
mean_acc_vec[i][1] = fold_acc.mean()
max_acc_vec[i][1] = fold_acc.max()
print fold_acc.mean()
print fold_acc.max()
i += 1
print mean_acc_vec
print max_acc_vec
plt.figure
plt.plot(mean_acc_vec[:,0], mean_acc_vec[:,1], label='Avg Validation accuracy')
plt.plot(max_acc_vec[:,0], max_acc_vec[:,1], label='Max Validation accuracy')
plt.legend(loc=4)
plt.title('Validation Accuracy Vs Number of Folds')
plt.xlabel('Number of folds')
plt.ylabel('Validation accuracy')
plt.savefig('train_acc_vs_fold.png', dpi=200)
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/metrics/classification.py | 1 | 91448 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# Shangwu Yao <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError("Classification metrics can't handle a mix of {0} "
"and {1} targets".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
unique_values = np.union1d(y_true, y_pred)
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the fraction of correctly
classified samples (float), else returns the number of correctly
classified samples (int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_score`` function.
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes)
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
n_labels = labels.size
label_to_ind = {y: x for x, y in enumerate(labels)}
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {'i', 'u', 'b'}:
dtype = np.int64
else:
dtype = np.float64
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels), dtype=dtype,
).toarray()
return CM
def multilabel_confusion_matrix(y_true, y_pred, sample_weight=None,
labels=None, samplewise=False):
"""Compute a confusion matrix for each class or sample
.. versionadded:: 0.21
Compute class-wise (default) or sample-wise (samplewise=True) multilabel
confusion matrix to evaluate the accuracy of a classification, and output
confusion matrices for each class or sample.
In multilabel confusion matrix :math:`MCM`, the count of true negatives
is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,
true positives is :math:`MCM_{:,1,1}` and false positives is
:math:`MCM_{:,0,1}`.
Multiclass data will be treated as if binarized under a one-vs-rest
transformation. Returned confusion matrices will be in the order of
sorted unique labels in the union of (y_true, y_pred).
Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
of shape (n_samples, n_outputs) or (n_samples,)
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
of shape (n_samples, n_outputs) or (n_samples,)
Estimated targets as returned by a classifier
sample_weight : array-like of shape = (n_samples,), optional
Sample weights
labels : array-like
A list of classes or column indices to select some (or to force
inclusion of classes absent from the data)
samplewise : bool, default=False
In the multilabel case, this calculates a confusion matrix per sample
Returns
-------
multi_confusion : array, shape (n_outputs, 2, 2)
A 2x2 confusion matrix corresponding to each output in the input.
When calculating class-wise multi_confusion (default), then
n_outputs = n_labels; when calculating sample-wise multi_confusion
(samplewise=True), n_outputs = n_samples. If ``labels`` is defined,
the results will be returned in the order specified in ``labels``,
otherwise the results will be returned in sorted order by default.
See also
--------
confusion_matrix
Notes
-----
The multilabel_confusion_matrix calculates class-wise or sample-wise
multilabel confusion matrices, and in multiclass tasks, labels are
binarized under a one-vs-rest way; while confusion_matrix calculates
one confusion matrix for confusion between every two classes.
Examples
--------
Multilabel-indicator case:
>>> import numpy as np
>>> from sklearn.metrics import multilabel_confusion_matrix
>>> y_true = np.array([[1, 0, 1],
... [0, 1, 0]])
>>> y_pred = np.array([[1, 0, 0],
... [0, 1, 1]])
>>> multilabel_confusion_matrix(y_true, y_pred)
array([[[1, 0],
[0, 1]],
<BLANKLINE>
[[1, 0],
[0, 1]],
<BLANKLINE>
[[0, 1],
[1, 0]]])
Multiclass case:
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> multilabel_confusion_matrix(y_true, y_pred,
... labels=["ant", "bird", "cat"])
array([[[3, 1],
[0, 2]],
<BLANKLINE>
[[5, 0],
[1, 0]],
<BLANKLINE>
[[2, 1],
[1, 2]]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ("binary", "multiclass", "multilabel-indicator"):
raise ValueError("%s is not supported" % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
if y_true.ndim == 1:
if samplewise:
raise ValueError("Samplewise metrics are not available outside of "
"multilabel classification.")
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d < 0' % np.min(labels))
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
r"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
Weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<https://www.mitpressjournals.org/doi/pdf/10.1162/coli.07-034-R2>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
.. deprecated:: 0.21
This is deprecated to be removed in 0.23, since its handling of
binary and multiclass inputs was broken. `jaccard_score` has an API
that is consistent with precision_score, f_score, etc.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
"""
warnings.warn('jaccard_similarity_score has been deprecated and replaced '
'with jaccard_score. It will be removed in version 0.23. '
'This implementation has surprising behavior for binary '
'and multiclass classification tasks.', DeprecationWarning)
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def jaccard_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float (if average is not None) or array of floats, shape =\
[n_unique_labels]
See also
--------
accuracy_score, f_score, multilabel_confusion_matrix
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == 'micro':
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
jaccard = _prf_divide(numerator, denominator, 'jaccard',
'true or predicted', average, ('jaccard',))
if average is None:
return jaccard
if average == 'weighted':
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
# numerator is 0, and warning should have already been issued
weights = None
elif average == 'samples' and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Binary and multiclass labels are supported. Only in the binary case does
this relate to information about true and false positives and negatives.
See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<https://doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
.. [3] `Gorodkin, (2004). Comparing two K-category assignments by a
K-category correlation coefficient
<https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_
.. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN
Error Measures in MultiClass Prediction
<https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred)
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in {"binary", "multiclass"}:
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
t_sum = C.sum(axis=1, dtype=np.float64)
p_sum = C.sum(axis=0, dtype=np.float64)
n_correct = np.trace(C, dtype=np.float64)
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See also
--------
fbeta_score, precision_recall_fscore_support, jaccard_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro')
0.26...
>>> f1_score(y_true, y_pred, average='micro')
0.33...
>>> f1_score(y_true, y_pred, average='weighted')
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``.
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of recall in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> +inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
array([0.71..., 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``.
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):
"""Validation associated with set-wise metrics
Returns identified labels
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError("pos_label=%r is not a valid label: "
"%r" % (pos_label, present_labels))
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == 'multiclass':
average_options.remove('samples')
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting, one of %r."
% (y_type, average_options))
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
return labels
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
Notes
-----
When ``true positive + false positive == 0``, precision is undefined;
When ``true positive + false negative == 0``, recall is undefined.
In such cases, the metric will be set to 0, as will f-score, and
``UndefinedMetricWarning`` will be raised.
"""
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores to 0 and warn:
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
if np.isposinf(beta):
f_score = recall
else:
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
denom = beta2 * precision + recall
denom[denom == 0.] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``.
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, balanced_accuracy_score,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro')
0.33...
>>> recall_score(y_true, y_pred, average='micro')
0.33...
>>> recall_score(y_true, y_pred, average='weighted')
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([1., 0., 0.])
Notes
-----
When ``true positive + false negative == 0``, recall returns 0 and raises
``UndefinedMetricWarning``.
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
"""Compute the balanced accuracy
The balanced accuracy in binary and multiclass classification problems to
deal with imbalanced datasets. It is defined as the average of recall
obtained on each class.
The best value is 1 and the worst value is 0 when ``adjusted=False``.
Read more in the :ref:`User Guide <balanced_accuracy_score>`.
Parameters
----------
y_true : 1d array-like
Ground truth (correct) target values.
y_pred : 1d array-like
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
adjusted : bool, default=False
When true, the result is adjusted for chance, so that random
performance would score 0, and perfect performance scores 1.
Returns
-------
balanced_accuracy : float
See also
--------
recall_score, roc_auc_score
Notes
-----
Some literature promotes alternative definitions of balanced accuracy. Our
definition is equivalent to :func:`accuracy_score` with class-balanced
sample weights, and shares desirable properties with the binary case.
See the :ref:`User Guide <balanced_accuracy_score>`.
References
----------
.. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010).
The balanced accuracy and its posterior distribution.
Proceedings of the 20th International Conference on Pattern
Recognition, 3121-24.
.. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015).
`Fundamentals of Machine Learning for Predictive Data Analytics:
Algorithms, Worked Examples, and Case Studies
<https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_.
Examples
--------
>>> from sklearn.metrics import balanced_accuracy_score
>>> y_true = [0, 1, 0, 0, 1, 0]
>>> y_pred = [0, 1, 0, 0, 0, 1]
>>> balanced_accuracy_score(y_true, y_pred)
0.625
"""
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2, output_dict=False):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool (default = False)
If True, return output as dict
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), sample average (only for multilabel classification) and
micro average (averaging the total true positives, false negatives and
false positives) it is only shown for multi-label or multi-class
with a subset of classes because it is accuracy otherwise.
See also:func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
labels_given = True
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
# labelled micro average
micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and
(not labels_given or
(set(labels) == set(unique_labels(y_true, y_pred)))))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers,
[i.item() for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
# compute all applicable averages
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true, y_pred, labels=labels,
average=average, sample_weight=sample_weight)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(
zip(headers, [i.item() for i in avg]))
else:
if line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \
' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '',
*avg[2:], width=width,
digits=digits)
else:
report += row_fmt.format(line_heading, *avg,
width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default='deprecated')
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
.. deprecated:: 0.21
This parameter ``labels`` is deprecated in version 0.21 and will
be removed in version 0.23. Hamming loss uses ``y_true.shape[1]``
for the number of labels when y_true is binary label indicators,
so it is unnecessary for the user to specify.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss corresponds to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function, when `normalize` parameter is set to
True.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does not entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes only the
individual labels.
The Hamming loss is upperbounded by the subset zero-one loss, when
`normalize` parameter is set to True. It is always between 0 and 1,
lower being better.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if labels is not None:
warnings.warn("The labels parameter is unused. It was"
" deprecated in version 0.21 and"
" will be removed in version 0.23",
DeprecationWarning)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * y_true.shape[1] * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(random_state=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision)
0.30...
In the multiclass case:
>>> import numpy as np
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC()
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels)
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
np.clip(losses, 0, None, out=losses)
return np.average(losses, weights=sample_weight)
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1). The Brier loss is composed of refinement loss and
calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class.
Defaults to the greater label unless y_true is all 0 or all -1
in which case pos_label defaults to 1.
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob)
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0)
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham")
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
check_consistent_length(y_true, y_prob, sample_weight)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Labels in y_true: %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
# if pos_label=None, when y_true is in {-1, 1} or {0, 1},
# pos_labe is set to 1 (consistent with precision_recall_curve/roc_curve),
# otherwise pos_label is set to the greater label
# (different from precision_recall_curve/roc_curve,
# the purpose is to keep backward compatibility).
if pos_label is None:
if (np.array_equal(labels, [0]) or
np.array_equal(labels, [-1])):
pos_label = 1
else:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/tsa/arima_process.py | 26 | 30878 | '''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn,
burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2 * nobs) # no idea right now how large is needed
else:
nobs_ir = max(100, 2 * nobs) # no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs])
for t in range(nobs)])
else:
acovf = np.correlate(ir, ir, 'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar, ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2, nobs+1):
r = acov[:k]
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
`
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)``
fully tested against matlab
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
"""
Represent an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
Parameters
----------
ar : array_like, 1d
Coefficient for autoregressive lag polynomial, including zero lag.
See the notes for some information about the sign.
ma : array_like, 1d
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -ar] # add zero-lag and negate
>>> ma = np.r_[1, ma] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
# maybe needs special handling for unit roots
def __init__(self, ar, ma, nobs=100):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=100):
"""
Create ArmaProcess instance from coefficients of the lag-polynomials
Parameters
----------
arcoefs : array-like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array-like
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
"""
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create ArmaProcess instance from ARMA estimation results
Parameters
----------
model_results : ARMAResults instance
A fitted model
nobs : int, optional
If None, nobs is taken from the results
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(),
self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(),
self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
@property
def arroots(self):
"""
Roots of autoregressive lag-polynomial
"""
return self.arpoly.roots()
@property
def maroots(self):
"""
Roots of moving average lag-polynomial
"""
return self.mapoly.roots()
@property
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.arroots) > 1):
return True
else:
return False
@property
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
#TODO: variable returns like this?
pr = self.ma_roots()
insideroots = np.abs(pr) < 1
if insideroots.any():
pr[np.abs(pr) < 1] = 1./pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
'''generate ARMA samples
Parameters
----------
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, then the timeseries is along
axis. All other axis have independent arma samples.
scale : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations
at the beginning of the sample are dropped
axis : int
See nsample.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not
tested yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
| bsd-3-clause |
jakereimer/pipeline | python/pipeline/utils/mask_classification.py | 5 | 11400 | """ Mask classification functions. """
import numpy as np
def classify_manual(masks, template):
""" Opens a GUI that lets you manually classify masks into any of the valid types.
:param np.array masks: 3-d array of masks (num_masks, image_height, image_width)
:param np.array template: Image used as background to help with mask classification.
"""
import matplotlib.pyplot as plt
import seaborn as sns
mask_types= []
plt.ioff()
for mask in masks:
ir = mask.sum(axis=1) > 0
ic = mask.sum(axis=0) > 0
il, jl = [max(np.min(np.where(i)[0]) - 10, 0) for i in [ir, ic]]
ih, jh = [min(np.max(np.where(i)[0]) + 10, len(i)) for i in [ir, ic]]
tmp_mask = np.array(mask[il:ih, jl:jh])
with sns.axes_style('white'):
fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(10, 3))
ax[0].imshow(template[il:ih, jl:jh], cmap=plt.cm.get_cmap('gray'))
ax[1].imshow(template[il:ih, jl:jh], cmap=plt.cm.get_cmap('gray'))
tmp_mask[tmp_mask == 0] = np.NaN
ax[1].matshow(tmp_mask, cmap=plt.cm.get_cmap('viridis'), alpha=0.5, zorder=10)
ax[2].matshow(tmp_mask, cmap=plt.cm.get_cmap('viridis'))
for a in ax:
a.set_aspect(1)
a.axis('off')
fig.tight_layout()
fig.canvas.manager.window.wm_geometry("+250+250")
fig.suptitle('S(o)ma, A(x)on, (D)endrite, (N)europil, (A)rtifact or (U)nknown?')
def on_button(event):
if event.key == 'o':
mask_types.append('soma')
plt.close(fig)
elif event.key == 'x':
mask_types.append('axon')
plt.close(fig)
elif event.key == 'd':
mask_types.append('dendrite')
plt.close(fig)
elif event.key == 'n':
mask_types.append('neuropil')
plt.close(fig)
elif event.key == 'a':
mask_types.append('artifact')
plt.close(fig)
elif event.key == 'u':
mask_types.append('unknown')
plt.close(fig)
fig.canvas.mpl_connect('key_press_event', on_button)
plt.show()
sns.reset_orig()
return mask_types
def classify_manual_extended(masks,template1,template2,template3,template4,template5,traces1,traces2,movie,threshold=80,window=3):
""" Opens a GUI that lets you manually classify masks into any of the valid types.
:param np.array masks: 3-d array of masks (num_masks, image_height, image_width)
:param np.array template1: Image used as background to help with mask classification.
:param np.array template2: Image used as background to help with mask classification.
:param np.array template3: Image used as background to help with mask classification.
:param np.array template4: Image used as background to help with mask classification.
:param np.array template5: Series of 7 images used as background to help with mask classification.
:param np.array traces1: 2-d array of mask activity plotted and used to highlight high activity frames (num_masks,num_frames)
:param np.array traces2: 2-d array of mask activity, plotted (num_masks,num_frames)
:param np.array movie: 3-d array of motion corrected imaging frames (image_height, image_width, num_frames)
:param float threshold: percentile between 0 and 100 used to plot inner versus outer mask
:param int window: odd number indicating width of window used in median filter of trace 1 searching for high activity frames
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
from scipy import signal
mask_types= []
plt.ioff()
for mask, trace1, trace2 in zip(masks, traces1, traces2):
with sns.axes_style('white'):
fig, axes = plt.subplots(4, 7, figsize(30, 20))
ir = mask.sum(axis=1) > 0
ic = mask.sum(axis=1) > 0
il, jl = [max(np.min(np.where(i)[0]) - 10, 0) for i in [ir, ic]]
ih, jh = [min(np.max(np.where(i)[0]) + 10, len(i)) for i in [ir, ic]]
plot_mask = np.array(mask[il:ih, jl:jh])
for ax,template in zip(axes[0][:6], [plot_mask, template1, template2-template1,
template1, template4, template3, template3*template1]):
ax.matshow(template[il:ih, jl:jh], cmap=cm.get_cmap('gray'))
ax.contour(plot_mask, np.percentile(mask[mask>0], threshold), linewidths=0.8, colors='w')
ax.contour(plot_mask, [0.01], linewidths=0.8, colors='w')
ax.set_aspect(1)
ax.axis('off')
for ax,template in zip(axes[1], template5):
ax.matshow(template[il:ih, jl:jh], cmap=cm.get_cmap('gray'))
ax.contour(plot_mask, np.percentile(mask[mask > 0], threshold), linewidths=0.8, colors='w')
ax.contour(plot_mask, [0.01], linewidths=0.8, colors='w')
ax.set_aspect(1)
ax.axis('off')
filt_trace = signal.medfilt(trace1, window)
idx = detect_peaks(filt_trace, mpd=len(trace1)/window)
centers = np.flip(sorted(np.stack([idx, filt_trace[idx]]).T, key=lambda x: x[1]))[:7]
for ax,center in zip(axes[3], sorted(centers, key=lambda x: x[0])[:][0]):
frame = np.max(movie[0][:, :, int(center-window/2):int(center+window/2+.5)])
ax.matshow(frame[il:ih, jl:jh], cmap=cm.get_cmap('gray'))
ax.contour(plot_mask, np.percentile(mask[mask > 0], threshold), linewidths=0.8, colors='w')
ax.contour(plot_mask, [0.01], linewidths=0.8, colors='w')
ax.set_aspect(1)
ax.axis('off')
trace1_ax = plt.subplot(8, 1, 5)
trace1_ax.plot(trace1)
trace1_ax.plot(centers, trace1[[int(center) for center in centers]], 'or')
trace2_ax = plt.subplot(8, 1, 6)
trace2_ax.plot(trace2)
fig.tight_layout()
fig.canvas.manager.window.wm_geometry("+250+250")
fig.suptitle('S(o)ma, A(x)on, (D)endrite, (N)europil, (A)rtifact or (U)nknown?')
def on_button(event):
if event.key == 'o':
mask_types.append('soma')
plt.close(fig)
elif event.key == 'x':
mask_types.append('axon')
plt.close(fig)
elif event.key == 'd':
mask_types.append('dendrite')
plt.close(fig)
elif event.key == 'n':
mask_types.append('neuropil')
plt.close(fig)
elif event.key == 'a':
mask_types.append('artifact')
plt.close(fig)
elif event.key == 'u':
mask_types.append('unknown')
plt.close(fig)
fig.canvas.mpl_connect('key_press_event', on_button)
plt.show()
sns.reset_orig()
return mask_types
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
__author__ = "Marcos Duarte, https://github.com/demotu/BMC"
__version__ = "1.0.4"
__license__ = "MIT"
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
| lgpl-3.0 |
winklerand/pandas | pandas/tests/io/parser/test_textreader.py | 1 | 12995 | # -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
import pytest
from pandas.compat import StringIO, BytesIO, map
from pandas import compat
import os
import sys
from numpy import nan
import numpy as np
from pandas import DataFrame
from pandas.io.parsers import (read_csv, TextFileReader)
from pandas.util.testing import assert_frame_equal
import pandas.util.testing as tm
from pandas._libs.parsers import TextReader
import pandas._libs.parsers as parser
class TestTextReader(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f)
result = reader.read() # noqa
finally:
f.close()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f, memory_map=True, header=None)
reader.read()
finally:
f.close()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
@tm.capture_stderr
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
pytest.raises(parser.ParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object),
1: np.array(['b', 'e', 'h', 'm'], dtype=object),
2: np.array(['c', 'f', 'i', 'n'], dtype=object)}
assert_array_dicts_equal(result, expected)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
assert 'Skipping line 4' in val
assert 'Skipping line 6' in val
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
assert header == expected
recs = reader.read()
expected = {0: np.array([1, 4], dtype=np.int64),
1: np.array([2, 5], dtype=np.int64),
2: np.array([3, 6], dtype=np.int64)}
assert_array_dicts_equal(recs, expected)
# not enough rows
pytest.raises(parser.ParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_header_not_enough_lines_as_recarray(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',',
header=2, as_recarray=True)
header = reader.header
expected = [['a', 'b', 'c']]
assert header == expected
recs = reader.read()
expected = {'a': np.array([1, 4], dtype=np.int64),
'b': np.array([2, 5], dtype=np.int64),
'c': np.array([3, 6], dtype=np.int64)}
assert_array_dicts_equal(expected, recs)
# not enough rows
pytest.raises(parser.ParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
assert result[0].dtype == 'S5'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
assert (result[0] == ex_values).all()
assert result[1].dtype == 'i4'
reader = _make_reader(dtype='S4')
result = reader.read()
assert result[0].dtype == 'S4'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
assert (result[0] == ex_values).all()
assert result[1].dtype == 'S4'
def test_numpy_string_dtype_as_recarray(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
assert result['0'].dtype == 'S4'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
assert (result['0'] == ex_values).all()
assert result['1'].dtype == 'S4'
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'S1'
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'O'
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'O'
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
assert len(result) == 2
assert (result[1] == exp[1]).all()
assert (result[2] == exp[2]).all()
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4], dtype=np.int64),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n' * 2 + '\n' * 2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def test_empty_csv_input(self):
# GH14867
df = read_csv(StringIO(), chunksize=20, header=None,
names=['a', 'b', 'c'])
assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert tm.assert_numpy_array_equal(np.asarray(v),
np.asarray(right[k]))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.