repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
numenta/nupic.critic | gen1/nupic_output.py | 3 | 6058 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import os
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
from nupic.algorithms import anomaly_likelihood
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
DEFAULT_OUTPUT_PATH = "."
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, name, path=DEFAULT_OUTPUT_PATH):
self.name = name
self.path = path
@abstractmethod
def write(row, result):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFile = None
self.outputWriter = None
self.lineCount = None
self.lineCount = 0
outputFilePath = os.path.join(self.path, "%s.csv" % self.name)
print "Preparing to output %s data to %s" % (self.name, outputFilePath)
self.outputFile = open(outputFilePath, "w")
self.outputWriter = csv.writer(self.outputFile)
self._headerWritten = False
self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood()
def write(self, row, result):
row["anomalyScore"] = result.inferences["anomalyScore"]
if not self._headerWritten:
keys = row.keys()
keys.append("predicted")
keys.append("anomalyLikelihood")
self.outputWriter.writerow(keys)
self._headerWritten = True
predicted = result.inferences["multiStepBestPredictions"][1]
value = row["b3"]
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, row["anomalyScore"], row["seconds"]
)
rows = row.values()
rows.append(predicted)
rows.append(anomalyLikelihood)
self.outputWriter.writerow(rows)
self.lineCount += 1
def close(self):
self.outputFile.close()
print "Done. Wrote %i data lines to %s." % (self.lineCount, self.outputFile)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
self.names = [self.name]
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('Frequency Bucket')
plt.xlabel('Seconds')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
# print self.dates[index]
# self.convertedDates.append(deque(
# [date2num(date) for date in self.dates[index]], maxlen=WINDOW
# ))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
# self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.dates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.dates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
| agpl-3.0 |
joshbohde/scikit-learn | sklearn/preprocessing/tests/test_preprocessing.py | 2 | 12134 | import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import Scaler
from sklearn.preprocessing import scale
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
np.random.seed(0)
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler():
"""Test scaling of dataset along all axis"""
# First test with 1D data
X = np.random.randn(5)
X_orig_copy = X.copy()
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# Test with 2D data
X = np.random.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = np.random.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = Scaler(with_mean=False)
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert X_norm is X
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sp.csr_matrix)
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sp.csr_matrix)
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert X_bin is not X
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert X_bin is not X
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert X_bin is X
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
inp = [(2, 3), (1,), (1, 2)]
expected = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to Scaler in feature space"""
X_fit = np.random.random((5, 4))
scaler = Scaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = np.random.random((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
X = np.random.random((5, 4))
for obj in ((Scaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
| bsd-3-clause |
robmcmullen/peppy | peppy/hsi/hsi_plugin.py | 1 | 4164 | # peppy Copyright (c) 2006-2010 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
import peppy.vfs as vfs
from peppy.yapsy.plugins import *
from peppy.debug import *
from peppy.hsi.loader import *
from peppy.hsi.hsi_stc import *
from peppy.about import AddCopyright
AddCopyright("Matplotlib", "http://matplotlib.sourceforge.org/", "John D. Hunter", "2003-2008", "Colormaps from")
class HSIPlugin(IPeppyPlugin):
"""HSI viewer plugin to register modes and user interface.
"""
def getHSIModeClass(self):
"""Attempt to import the HSI mode and initialize any needed resources
"""
try:
import peppy.hsi.hsi_major_mode
mode = peppy.hsi.hsi_major_mode.HSIMode
except Exception, e:
dprint("FAILED Loading hsi_major_mode")
import traceback
error = traceback.format_exc()
dprint(error)
raise
import peppy.hsi.common
peppy.hsi.common.scipy_module()
return mode
def attemptOpen(self, buffer, url):
assert self.dprint("Trying to open url: %s" % repr(unicode(url)))
mode = self.getHSIModeClass()
format = HyperspectralFileFormat.identify(url)
if format:
assert self.dprint("found %s" % repr(format))
return (None, [peppy.hsi.hsi_major_mode.HSIMode])
return (None, [])
def getCompatibleMajorModes(self, stc_class):
if hasattr(stc_class, 'getCube'):
try:
yield self.getHSIModeClass()
except:
dprint("FAILED Loading hsi_major_mode")
import traceback
error = traceback.format_exc()
dprint(error)
pass
raise StopIteration
def getCompatibleMinorModes(self, cls):
if cls.keyword == "HSI":
try:
import peppy.hsi.plotters
for mode in [peppy.hsi.plotters.HSIXProfileMinorMode,
peppy.hsi.plotters.HSIYProfileMinorMode,
peppy.hsi.plotters.HSISpectrumMinorMode]:
yield mode
except:
pass
raise StopIteration
def getCompatibleActions(self, modecls):
assert self.dprint("Checking for HSI mode %s" % modecls)
if modecls.keyword == "HSI":
try:
import peppy.hsi.hsi_menu
import peppy.hsi.filter_menu
return [peppy.hsi.hsi_menu.PrevBand,
peppy.hsi.hsi_menu.NextBand,
peppy.hsi.hsi_menu.SelectBand,
peppy.hsi.hsi_menu.GotoBand,
peppy.hsi.hsi_menu.BandSlider,
peppy.hsi.hsi_menu.BandSliderUpdates,
peppy.hsi.filter_menu.ColormapAction,
peppy.hsi.filter_menu.ContrastFilterAction,
peppy.hsi.filter_menu.MedianFilterAction,
peppy.hsi.filter_menu.GaussianFilterAction,
peppy.hsi.filter_menu.ClippingFilterAction,
peppy.hsi.filter_menu.SubtractBandAction,
peppy.hsi.hsi_menu.SwapEndianAction,
peppy.hsi.hsi_menu.CubeViewAction,
peppy.hsi.hsi_menu.ShowPixelValues,
peppy.hsi.hsi_menu.TestSubset,
peppy.hsi.hsi_menu.SpatialSubset,
peppy.hsi.hsi_menu.FocalPlaneAverage,
peppy.hsi.hsi_menu.ScaleImageDimensions,
peppy.hsi.hsi_menu.ReduceImageDimensions,
peppy.hsi.hsi_menu.ExportAsENVI,
peppy.hsi.hsi_menu.ExportAsENVIBigEndian,
peppy.hsi.hsi_menu.ExportAsENVILittleEndian,
peppy.hsi.hsi_menu.ExportAsImage,
]
except Exception, e:
dprint(e)
pass
return []
| gpl-2.0 |
CforED/Machine-Learning | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/tree/tree.py | 8 | 44488 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the integer %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the float %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'tree_')
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeClassifier, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeRegressor, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
| bsd-3-clause |
manipopopo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 39 | 20233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
TheWylieStCoyote/gnuradio | gr-fec/python/fec/polar/testbed.py | 3 | 11661 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from .encoder import PolarEncoder
from .decoder import PolarDecoder
from . import channel_construction as cc
from .helper_functions import *
import matplotlib.pyplot as plt
def get_frozen_bit_position():
# frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 16, 17, 18, 20, 24), dtype=int)
# frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
m = 256
n_frozen = m // 2
frozenbitposition = cc.get_frozen_bit_indices_from_z_parameters(cc.bhattacharyya_bounds(0.0, m), n_frozen)
print(frozenbitposition)
return frozenbitposition
def test_enc_dec_chain():
ntests = 100
n = 256
k = n // 2
frozenbits = np.zeros(n - k)
frozenbitposition = get_frozen_bit_position()
for i in range(ntests):
bits = np.random.randint(2, size=k)
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
rx = decoder.decode(encoded)
if not is_equal(bits, rx):
raise ValueError('Test #', i, 'failed, input and output differ', bits, '!=', rx)
return
def is_equal(first, second):
if not (first == second).all():
result = first == second
for i in range(len(result)):
print('{0:4}: {1:2} == {2:1} = {3}'.format(i, first[i], second[i], result[i]))
return False
return True
def exact_value(la, lb):
return np.log((np.exp(la + lb) + 1) / (np.exp(la + np.exp(lb))))
def approx_value(la, lb):
return np.sign(la) * np.sign(lb) * np.minimum(np.abs(la), np.abs(lb))
def path_metric_exact(last_pm, llr, ui):
return last_pm + np.log(1 + np.exp(-1. * llr * (1 - 2 * ui)))
def path_metric_approx(last_pm, llr, ui):
if ui == int(.5 * (1 - np.sign(llr))):
return last_pm
return last_pm + np.abs(llr)
def calculate_path_metric_vector(metric, llrs, us):
res = np.zeros(llrs.size)
res[0] = metric(0, llrs[0], us[0])
for i in range(1, llrs.size):
res[i] = metric(res[i - 1], llrs[i], us[i])
return res
def test_1024_rate_1_code():
# effectively a Monte-Carlo simulation for channel polarization.
ntests = 10000
n = 256
k = n
transition_prob = 0.11
num_transitions = int(k * transition_prob)
frozenbits = np.zeros(n - k)
frozenbitposition = np.array((), dtype=int)
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
channel_counter = np.zeros(k)
possible_indices = np.arange(n, dtype=int)
for i in range(ntests):
bits = np.random.randint(2, size=k)
tx = encoder.encode(bits)
np.random.shuffle(possible_indices)
tx[possible_indices[0:num_transitions]] = (tx[possible_indices[0:num_transitions]] + 1) % 2
rx = tx
recv = decoder.decode(rx)
channel_counter += (bits == recv)
print(channel_counter)
print(np.min(channel_counter), np.max(channel_counter))
np.save('channel_counter_' + str(ntests) + '.npy', channel_counter)
def find_good_indices(res, nindices):
channel_counter = np.copy(res)
good_indices = np.zeros(channel_counter.size)
for i in range(nindices):
idx = np.argmax(channel_counter)
good_indices[idx] = 1
channel_counter[idx] = 0
return good_indices
def channel_analysis():
ntests = 10000
filename = 'channel_counter_' + str(ntests) + '.npy'
channel_counter = np.load(filename)
print(np.min(channel_counter), np.max(channel_counter))
channel_counter[0] = np.min(channel_counter)
good_indices = find_good_indices(channel_counter, channel_counter.size // 2)
info_bit_positions = np.where(good_indices > 0)
print(info_bit_positions)
frozen_bit_positions = np.delete(np.arange(channel_counter.size), info_bit_positions)
print(frozen_bit_positions)
np.save('frozen_bit_positions_n256_k128_p0.11.npy', frozen_bit_positions)
good_indices *= 2000
good_indices += 4000
plt.plot(channel_counter)
plt.plot(good_indices)
plt.show()
def merge_first_stage(init_mask):
merged_frozen_mask = []
for e in range(0, len(init_mask), 2):
v = [init_mask[e]['value'][0], init_mask[e + 1]['value'][0]]
s = init_mask[e]['size'] * 2
if init_mask[e]['type'] == init_mask[e + 1]['type']:
t = init_mask[e]['type']
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
else:
t = 'RPT'
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
return merged_frozen_mask
def merge_second_stage(init_mask):
merged_frozen_mask = []
for e in range(0, len(init_mask), 2):
if init_mask[e]['type'] == init_mask[e + 1]['type']:
t = init_mask[e]['type']
v = init_mask[e]['value']
v.extend(init_mask[e + 1]['value'])
s = init_mask[e]['size'] * 2
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
elif init_mask[e]['type'] == 'ZERO' and init_mask[e + 1]['type'] == 'RPT':
t = init_mask[e + 1]['type']
v = init_mask[e]['value']
v.extend(init_mask[e + 1]['value'])
s = init_mask[e]['size'] * 2
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
elif init_mask[e]['type'] == 'RPT' and init_mask[e + 1]['type'] == 'ONE':
t = 'SPC'
v = init_mask[e]['value']
v.extend(init_mask[e + 1]['value'])
s = init_mask[e]['size'] * 2
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
else:
merged_frozen_mask.append(init_mask[e])
merged_frozen_mask.append(init_mask[e + 1])
return merged_frozen_mask
def merge_stage_n(init_mask):
merged_frozen_mask = []
n_elems = len(init_mask) - (len(init_mask) % 2)
for e in range(0, n_elems, 2):
if init_mask[e]['size'] == init_mask[e + 1]['size']:
if (init_mask[e]['type'] == 'ZERO' or init_mask[e]['type'] == 'ONE') and init_mask[e]['type'] == init_mask[e + 1]['type']:
t = init_mask[e]['type']
v = init_mask[e]['value']
v.extend(init_mask[e + 1]['value'])
s = init_mask[e]['size'] * 2
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
elif init_mask[e]['type'] == 'ZERO' and init_mask[e + 1]['type'] == 'RPT':
t = init_mask[e + 1]['type']
v = init_mask[e]['value']
v.extend(init_mask[e + 1]['value'])
s = init_mask[e]['size'] * 2
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
elif init_mask[e]['type'] == 'SPC' and init_mask[e + 1]['type'] == 'ONE':
t = init_mask[e]['type']
v = init_mask[e]['value']
v.extend(init_mask[e + 1]['value'])
s = init_mask[e]['size'] * 2
merged_frozen_mask.append({'value': v, 'type': t, 'size': s})
else:
merged_frozen_mask.append(init_mask[e])
merged_frozen_mask.append(init_mask[e + 1])
else:
merged_frozen_mask.append(init_mask[e])
merged_frozen_mask.append(init_mask[e + 1])
if n_elems < len(init_mask):
merged_frozen_mask.append(init_mask[-1])
return merged_frozen_mask
def print_decode_subframes(subframes):
for e in subframes:
print(e)
def find_decoder_subframes(frozen_mask):
stages = power_of_2_int(len(frozen_mask))
block_size = 2 ** stages
lock_mask = np.zeros(block_size, dtype=int)
sub_mask = []
for e in frozen_mask:
if e == 1:
sub_mask.append(0)
else:
sub_mask.append(1)
sub_mask = np.array(sub_mask, dtype=int)
for s in range(0, stages):
stage_size = 2 ** s
mask = np.reshape(sub_mask, (-1, stage_size))
lock = np.reshape(lock_mask, (-1, stage_size))
for p in range(0, (block_size // stage_size) - 1, 2):
l0 = lock[p]
l1 = lock[p + 1]
first = mask[p]
second = mask[p + 1]
print(l0, l1)
print(first, second)
if np.all(l0 == l1):
for eq in range(2):
if np.all(first == eq) and np.all(second == eq):
mask[p].fill(eq)
mask[p + 1].fill(eq)
lock[p].fill(s)
lock[p + 1].fill(s)
if np.all(first == 0) and np.all(second == 2):
mask[p].fill(2)
mask[p + 1].fill(2)
lock[p].fill(s)
lock[p + 1].fill(s)
if np.all(first == 3) and np.all(second == 1):
mask[p].fill(3)
mask[p + 1].fill(3)
lock[p].fill(s)
lock[p + 1].fill(s)
if s == 0 and np.all(first == 0) and np.all(second == 1):
mask[p].fill(2)
mask[p + 1].fill(2)
lock[p].fill(s)
lock[p + 1].fill(s)
if s == 1 and np.all(first == 2) and np.all(second == 1):
mask[p].fill(3)
mask[p + 1].fill(3)
lock[p].fill(s)
lock[p + 1].fill(s)
sub_mask = mask.flatten()
lock_mask = lock.flatten()
words = {0: 'ZERO', 1: 'ONE', 2: 'RPT', 3: 'SPC'}
ll = lock_mask[0]
sub_t = sub_mask[0]
for i in range(len(frozen_mask)):
v = frozen_mask[i]
t = words[sub_mask[i]]
l = lock_mask[i]
# if i % 8 == 0:
# print
if not l == ll or not sub_mask[i] == sub_t:
print('--------------------------')
ll = l
sub_t = sub_mask[i]
print('{0:4} lock {1:4} value: {2} in sub {3}'.format(i, 2 ** (l + 1), v, t))
def systematic_encoder_decoder_chain_test():
print('systematic encoder decoder chain test')
block_size = int(2 ** 8)
info_bit_size = block_size // 2
ntests = 100
frozenbitposition = cc.get_frozen_bit_indices_from_z_parameters(cc.bhattacharyya_bounds(0.0, block_size), block_size - info_bit_size)
encoder = PolarEncoder(block_size, info_bit_size, frozenbitposition)
decoder = PolarDecoder(block_size, info_bit_size, frozenbitposition)
for i in range(ntests):
bits = np.random.randint(2, size=info_bit_size)
y = encoder.encode_systematic(bits)
u_hat = decoder.decode_systematic(y)
assert (bits == u_hat).all()
def main():
n = 8
m = 2 ** n
k = m // 2
n_frozen = n - k
# n = 16
# k = 8
# frozenbits = np.zeros(n - k)
# frozenbitposition8 = np.array((0, 1, 2, 4), dtype=int)
# frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
# print(frozenbitposition)
# test_enc_dec_chain()
# test_1024_rate_1_code()
# channel_analysis()
# frozen_indices = cc.get_bec_frozen_indices(m, n_frozen, 0.11)
# frozen_mask = cc.get_frozen_bit_mask(frozen_indices, m)
# find_decoder_subframes(frozen_mask)
systematic_encoder_decoder_chain_test()
if __name__ == '__main__':
main()
| gpl-3.0 |
ldirer/scikit-learn | examples/exercises/plot_cv_diabetes.py | 27 | 2775 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
tuned_parameters = [{'alpha': alphas}]
n_folds = 3
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)
clf.fit(X, y)
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
Quantipy/quantipy | quantipy/core/tools/dp/io.py | 1 | 11257 | import pandas as pd
import numpy as np
import json
import re
import copy
import itertools
import math
import re, string
import sqlite3
import sys
from ftfy import fix_text
from collections import OrderedDict
from quantipy.core.helpers.constants import DTYPE_MAP
from quantipy.core.helpers.constants import MAPPED_PATTERN
from itertools import product
from quantipy.core.tools.dp.dimensions.reader import quantipy_from_dimensions
from quantipy.core.tools.dp.dimensions.writer import dimensions_from_quantipy
from quantipy.core.tools.dp.decipher.reader import quantipy_from_decipher
from quantipy.core.tools.dp.spss.reader import parse_sav_file
from quantipy.core.tools.dp.spss.writer import save_sav
from quantipy.core.tools.dp.ascribe.reader import quantipy_from_ascribe
def make_like_ascii(text):
"""
Replaces any non-ascii unicode with ascii unicode.
"""
unicode_ascii_mapper = {
u'\u2022': u'-', # http://www.fileformat.info/info/unicode/char/2022/index.htm
u'\u2013': u'-', # http://www.fileformat.info/info/unicode/char/2013/index.htm
u'\u2018': u'\u0027', # http://www.fileformat.info/info/unicode/char/2018/index.htm
u'\u2019': u'\u0027', # http://www.fileformat.info/info/unicode/char/2019/index.htm
u'\u201c': u'\u0022', # http://www.fileformat.info/info/unicode/char/201C/index.htm
u'\u201d': u'\u0022', # http://www.fileformat.info/info/unicode/char/201D/index.htm
u'\u00a3': u'GBP ', # http://www.fileformat.info/info/unicode/char/a3/index.htm
u'\u20AC': u'EUR ', # http://www.fileformat.info/info/unicode/char/20aC/index.htm
u'\u2026': u'\u002E\u002E\u002E', # http://www.fileformat.info/info/unicode/char/002e/index.htm
}
for old, new in unicode_ascii_mapper.iteritems():
text = text.replace(old, new)
return text
def unicoder(obj, decoder='UTF-8', like_ascii=False):
"""
Decodes all the text (keys and strings) in obj.
Recursively mines obj for any str objects, whether keys or values,
converting any str objects to unicode and then correcting the
unicode (which may have been decoded incorrectly) using ftfy.
Parameters
----------
obj : object
The object to be mined.
Returns
-------
obj : object
The recursively decoded object.
"""
if isinstance(obj, list):
obj = [
unicoder(item, decoder, like_ascii)
for item in obj]
if isinstance(obj, tuple):
obj = tuple([
unicoder(item, decoder, like_ascii)
for item in obj])
elif isinstance(obj, (dict)):
obj = {
key: unicoder(value, decoder, like_ascii)
for key, value in obj.iteritems()}
elif isinstance(obj, str):
obj = fix_text(unicode(obj, decoder))
elif isinstance(obj, unicode):
obj = fix_text(obj)
if like_ascii and isinstance(obj, unicode):
obj = make_like_ascii(obj)
return obj
def encoder(obj, encoder='UTF-8'):
"""
Encodes all the text (keys and strings) in obj.
Recursively mines obj for any str objects, whether keys or values,
encoding any str objects.
Parameters
----------
obj : object
The object to be mined.
Returns
-------
obj : object
The recursively decoded object.
"""
if isinstance(obj, list):
obj = [
unicoder(item)
for item in obj
]
if isinstance(obj, tuple):
obj = tuple([
unicoder(item)
for item in obj
])
elif isinstance(obj, (dict)):
obj = {
key: unicoder(value)
for key, value in obj.iteritems()
}
elif isinstance(obj, str):
obj = obj.endoce(encoder)
return obj
def enjson(obj, indent=4, encoding='UTF-8'):
"""
Dumps unicode json allowing non-ascii characters encoded as needed.
"""
return json.dumps(obj, indent=indent, ensure_ascii=False).encode(encoding)
def load_json(path_json, hook=OrderedDict):
''' Returns a python object from the json file located at path_json
'''
with open(path_json) as f:
obj = unicoder(json.load(f, object_pairs_hook=hook))
return obj
def loads_json(json_text, hook=OrderedDict):
''' Returns a python object from the json string json_text
'''
obj = json.loads(json_text, object_pairs_hook=hook)
return obj
def load_csv(path_csv):
data = pd.DataFrame.from_csv(path_csv)
return data
def save_json(obj, path_json, decode_str=False, decoder='UTF-8'):
if decode_str:
obj = unicoder(obj, decoder)
def represent(obj):
if isinstance(obj, np.generic):
return np.asscalar(obj)
else:
return "Unserializable object: %s" % (str(type(obj)))
with open(path_json, 'w+') as f:
json.dump(obj, f, default=represent, sort_keys=True)
def df_to_browser(df, path_html='df.html', **kwargs):
import webbrowser
with open(path_html, 'w') as f:
f.write(df.to_html(**kwargs))
webbrowser.open(path_html, new=2)
def verify_dtypes_vs_meta(data, meta):
''' Returns a df showing the pandas dtype for each column in data compared
to the type indicated for that variable name in meta plus a 'verified'
column indicating if quantipy determines the comparison as viable.
data - (pandas.DataFrame)
meta - (dict) quantipy meta object
'''
dtypes = data.dtypes
dtypes.name = 'dtype'
var_types = pd.DataFrame({k: v['type'] for k, v in meta['columns'].iteritems()}, index=['meta']).T
df = pd.concat([var_types, dtypes.astype(str)], axis=1)
missing = df.loc[df['dtype'].isin([np.NaN])]['meta']
if missing.size>0:
print '\nSome meta not paired to data columns was found (these may be special data types):\n', missing, '\n'
df = df.dropna(how='any')
df['verified'] = df.apply(lambda x: x['dtype'] in DTYPE_MAP[x['meta']], axis=1)
return df
def coerce_dtypes_from_meta(data, meta):
data = data.copy()
verified = verify_dtypes_vs_meta(data, meta)
for idx in verified[~verified['verified']].index:
meta = verified.loc[idx]['meta']
dtype = verified.loc[idx]['dtype']
if meta in ["int", "single"]:
if dtype in ["object"]:
data[idx] = data[idx].convert_objects(convert_numeric=True)
data[idx] = data[idx].replace(np.NaN, 0).astype(int)
return data
def read_ddf(path_ddf, auto_index_tables=True):
''' Returns a raw version of the DDF in the form of a dict of
pandas DataFrames (one for each table in the DDF).
Parameters
----------
path_ddf : string, the full path to the target DDF
auto_index_tables : boolean (optional)
if True, will set the index for all returned DataFrames using the most
meaningful candidate column available. Columns set into the index will
not be dropped from the DataFrame.
Returns
----------
dict of pandas DataFrames
'''
# Read in the DDF (which is a sqlite file) and retain all available
# information in the form of pandas DataFrames.
with sqlite3.connect(path_ddf) as conn:
ddf = {}
ddf['sqlite_master'] = pd.read_sql(
'SELECT * FROM sqlite_master;',
conn
)
ddf['tables'] = {
table_name:
pd.read_sql('SELECT * FROM %s;' % (table_name), conn)
for table_name in ddf['sqlite_master']['tbl_name'].values
if table_name.startswith('L')
}
ddf['table_info'] = {
table_name:
pd.read_sql("PRAGMA table_info('%s');" % (table_name), conn)
for table_name in ddf['tables'].keys()
}
# If required, set the index for the expected Dataframes that should
# result from the above operation.
if auto_index_tables:
try:
ddf['sqlite_master'].set_index(
['name'],
drop=False,
inplace=True
)
except:
print (
"Couldn't set 'name' into the index for 'sqlite_master'."
)
for table_name in ddf['table_info'].keys():
try:
ddf['table_info'][table_name].set_index(
['name'],
drop=False,
inplace=True
)
except:
print (
"Couldn't set 'name' into the index for '%s'."
) % (table_name)
for table_name in ddf['tables'].keys():
index_col = 'TableName' if table_name=='Levels' else ':P0'
try:
ddf['table_info'][table_name].set_index(
['name'],
drop=False,
inplace=True
)
except:
print (
"Couldn't set '%s' into the index for the '%s' "
"Dataframe."
) % (index_col, table_name)
return ddf
def read_dimensions(path_mdd, path_ddf):
meta, data = quantipy_from_dimensions(path_mdd, path_ddf)
return meta, data
def write_dimensions(meta, data, path_mdd, path_ddf, text_key=None,
CRLF="CR", run=True, clean_up=True,
reuse_mdd=False):
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.setdefaultencoding("cp1252")
sys.stdout = default_stdout
sys.stderr = default_stderr
out = dimensions_from_quantipy(
meta, data, path_mdd, path_ddf, text_key, CRLF, run,
clean_up, reuse_mdd
)
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.setdefaultencoding("utf-8")
sys.stdout = default_stdout
sys.stderr = default_stderr
return out
def read_decipher(path_json, path_txt, text_key='main'):
meta, data = quantipy_from_decipher(path_json, path_txt, text_key)
return meta, data
def read_spss(path_sav, **kwargs):
meta, data = parse_sav_file(path_sav, **kwargs)
return meta, data
def write_spss(path_sav, meta, data, index=True, text_key=None,
mrset_tag_style='__', drop_delimited=True, from_set=None,
verbose=False):
save_sav(
path_sav,
meta,
data,
index=index,
text_key=text_key,
mrset_tag_style=mrset_tag_style,
drop_delimited=drop_delimited,
from_set=from_set,
verbose=verbose
)
def read_ascribe(path_xml, path_txt, text_key='main'):
meta, data = quantipy_from_ascribe(path_xml, path_txt, text_key)
return meta, data
def read_quantipy(path_json, path_csv):
"""
Load Quantipy meta and data from disk.
"""
meta = load_json(path_json)
data = load_csv(path_csv)
for col in meta['columns'].keys():
if meta['columns'][col]['type']=='date':
data[col] = pd.to_datetime(data[col])
return meta, data
def write_quantipy(meta, data, path_json, path_csv):
"""
Save Quantipy meta and data to disk.
"""
save_json(meta, path_json)
data.to_csv(path_csv)
| mit |
gsi-upm/senpy | example-plugins/sklearn/mypipeline.py | 1 | 1667 | #
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from mydata import text, labels
X_train, X_test, y_train, y_test = train_test_split(text, labels, test_size=0.12, random_state=42)
from sklearn.naive_bayes import MultinomialNB
count_vec = CountVectorizer(tokenizer=lambda x: x.split())
clf3 = MultinomialNB()
pipeline = Pipeline([('cv', count_vec),
('clf', clf3)])
pipeline.fit(X_train, y_train)
print('Feature names: {}'.format(count_vec.get_feature_names()))
print('Class count: {}'.format(clf3.class_count_))
if __name__ == '__main__':
print('--Results--')
tests = [
(['The sentiment for senpy should be positive :)', ], 1),
(['The sentiment for anything else should be negative :()', ], -1)
]
for features, expected in tests:
result = pipeline.predict(features)
print('Input: {}\nExpected: {}\nGot: {}'.format(features[0], expected, result))
| apache-2.0 |
ilo10/scikit-learn | sklearn/tests/test_cross_validation.py | 70 | 41943 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
lquirosd/TFM | ILA/code/predictLayout.py | 1 | 8608 | from __future__ import division
import numpy as np
import scipy.ndimage as ndi
from sklearn import mixture
def twoPointStencil2D(data, h=1):
"""
Compute two-Pooints stencil on each axis:
f(x+h)-f(x-h) 1Dconvolve([1, 0, -1])
f'(x) = ------------- = ----------------------
2h 2h
Handle borders using one-sided stencil
f(x)-f(x-h) f'(x) = f(x+h)-f(x)
f'(x) + ----------- -----------
h h
"""
der = np.zeros((data.shape[0], data.shape[1],2))
der[:,:,0] = ndi.convolve1d(data, [1, 0, -1], axis=0, mode= 'nearest')/(2*h)
der[:,:,1] = ndi.convolve1d(data, [1, 0, -1], axis=1, mode= 'nearest')/(2*h)
#--- Handle rows border
der[0,:,0] = (data[1,:] - data[0,:])/h
der[-1,:,0] = (data[-1,:] - data[-2,:])/h
#--- handle colums border
der[:,0,1] = (data[:,1] - data[:,0])/h
der[:,-1,1] = (data[:,-1] - data[:,-2])/h
return der
def derGMMmodel(GMMmodel, UB):
"""
Compute derivates of GMM model, respect to each corner as:
sum(W*N(x,\mu,\Sigma)*(x - \mu).T inv(\Sigma))
f'(x) = -----------------------------------------------
sum(W*N(x,\mu,\Sigma))
"""
outUB = UB
U = UB[0:2]
B = UB[2:4]
#--- Compute deriv respect to Upper corner
denU = np.exp(GMMmodel['Upper'].score(U.reshape(1,-1)))
numU = np.sum(
np.exp(
mixture.log_multivariate_normal_density(
GMMmodel['Upper'].means_,
GMMmodel['Upper'].covars_,
GMMmodel['Upper'].covariance_type)
)
* GMMmodel['Upper'].weights_
* (GMMmodel['Upper'].mean_ - U).T
* np.linalg.inv(GMMmodel['Upper'].covars_),
axis=0
)
outUB[0:2] = numU/denU
#--- Compute deriv respect to Bottom corner
denB = np.exp(GMMmodel['Bottom'].score(B.reshape(1,-1)))
numB = np.sum(
np.exp(
mixture.log_multivariate_normal_density(
GMMmodel['Bottom'].means_,
GMMmodel['Bottom'].covars_,
GMMmodel['Bottom'].covariance_type)
)
* GMMmodel['Bottom'].weights_
* (GMMmodel['Bottom'].mean_ - U).T
* np.linalg.inv(GMMmodel['Bottom'].covars_),
axis=0
)
outUB[2:4] = numB/denB
return outUB
def computeII(data):
"""
Computes Integral Image as defined on
Lewis, J.P. (1995). Fast template matching. Proc. Vision Interface
"""
return data.cumsum(axis=0).cumsum(axis=1)
def getIIsum(data, U, B):
"""
Compute summed area as:
A=U Bi=U[0],B[1]
+----------+
| |
| |
+----------+
C=B[0],U[1] D=B
\sum = I(D) - I(A) + I(Bi) + I(C)
"""
if (U == B):
return data[U]
else:
return (data[B] + data[U]) - (data[U[0], B[1]] + data[B[0], U[1]])
def computeLogProb(P1II, P0II, Qmodel, UB):
"""
Compute prob as:
#---
__ K __ |S_k| __|~S_k|
P(L) = \ \ log{P(s_d|h)} \ log{P(s_d|h)} + log{P(h)}
/__k=1 /__ d=1 /__d=1
log{P(h)} = log{P(u)P(b)} = log{P(u)} + log{P(b)}
Where \sum is computed using Inntegral Image
"""
U = UB[0:2]
B = UB[2:4]
#qProb = Qmodel['Upper'].score(U.reshape(1,-1)) + \
# Qmodel['Bottom'].score(B.reshape(1,-1))
pProb1 = getIIsum(P1II, (U[0], U[1]), (B[0], B[1]))
pProb0 = P0II[-1,-1] - getIIsum(P0II, (U[0], U[1]), (B[0], B[1]))
return pProb1 + pProb0 #+ qProb
def derP1(II, UB):
dUr = (getIIsum(II, (UB[0]+1, UB[1]), (UB[2],UB[3])) - getIIsum(II, (UB[0]-1, UB[1]), (UB[2],UB[3])))/2
dUc = (getIIsum(II, (UB[0], UB[1]+1), (UB[2],UB[3])) - getIIsum(II, (UB[0], UB[1]-1), (UB[2],UB[3])))/2
dBr = (getIIsum(II, (UB[0], UB[1]), (UB[2]+1,UB[3])) - getIIsum(II, (UB[0], UB[1]), (UB[2]-1,UB[3])))/2
dBc = (getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]+1)) - getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]-1)))/2
return np.array([dUr, dUc, dBr, dBc])
def derP0(II, UB):
all0 = 2*II[-1,-1]
dUr = (all0 - getIIsum(II, (UB[0]+1, UB[1]), (UB[2],UB[3])) + getIIsum(II, (UB[0]-1, UB[1]), (UB[2],UB[3])))/2
dUc = (all0 - getIIsum(II, (UB[0], UB[1]+1), (UB[2],UB[3])) + getIIsum(II, (UB[0], UB[1]-1), (UB[2],UB[3])))/2
dBr = (all0 - getIIsum(II, (UB[0], UB[1]), (UB[2]+1,UB[3])) + getIIsum(II, (UB[0], UB[1]), (UB[2]-1,UB[3])))/2
dBc = (all0 - getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]+1)) + getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]-1)))/2
return np.array([dUr, dUc, dBr, dBc])
def predictLayout(P1II, P0II, Qmodel, init=np.zeros(4), thr=0.001, T=100, alpha=0.1):
deltaLogProb = np.Inf
prevLogProb = 99999999999
bestUB = init
#--- Init Step
thisUB = init
bestLogProb = computeLogProb(P1II, P0II, Qmodel, thisUB)
#--- Iterate "T" times or until converge
for i in np.arange(T):
#thisUB = thisUB - (alpha * \
# (derPmodelII[thisUB[[0,2]],
# thisUB[[1,3]],:].flatten() + \
# derQmodel(Qmodel, thisUB)))
thisUB = thisUB - (
0.00001 * \
(
derP1(P1II, thisUB) + derP0(P0II, thisUB) #+ derGMMmodel(Qmodel, thisUB)
)
).astype(int)
print thisUB
logProb = computeLogProb(P1II, P0II, Qmodel, thisUB)
print "Iteration: {0:}, LogProb= {1:}".format(i, logProb)
#deltaLogProb = np.abs(logProb - prevLogProb)
prevLogProb = logProb
if (logProb > bestLogProb):
bestLogProb = logProb
bestUB = thisUB
if(deltaLogProb <= thr):
#--- Alg is converged, the get out of here!!!
print "hola"
break
return bestUB
def _testModule():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
try:
import cPickle as pickle
except:
import pickle as pickle
EPS = np.finfo(float).eps
fh = open("/home/lorenzoqd/TFM/ILA/models/CRFs/_z0.3_w32_g3/GMM_22_z0.3_w32_g3_u2_b3_model.pickle",'r')
Qmodel = pickle.load(fh)
fh.close()
P = np.loadtxt('/home/lorenzoqd/TFM/ILA/models/CRFs/_z0.3_w32_g3/test_pos/bla.txt')
P1 = P[:,1].copy()
P0 = P[:,1].copy()
P1[P[:,0]==0] = 1 - P1[P[:,0]==0]
P0[P[:,0]==1] = 1 - P1[P[:,0]==1]
P1 = np.log(P1 + EPS).reshape(365,230)
P0 = np.log(P0 + EPS).reshape(365,230)
#Pmodel = np.log(P1)
#Pmodel0 = Pmodel.copy()
#Pmodel1 = Pmodel.copy()
#Pmodel1[P[:,0]==0] = 0
#Pmodel1 = Pmodel1.reshape(365,230)
#Pmodel0[P[:,0]==1] = 0
#Pmodel0 = Pmodel0.reshape(365,230)
T = 100
thr = 0.1 #--- keep hight for test only
alpha = 0.1
#--- Test computeII -> OK
P1II = computeII(P1)
P0II = computeII(P0)
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].axis('off')
ax[0].imshow(P1, cmap=cm.coolwarm)
ax[1].axis('off')
ax[1].imshow(P0, cmap=cm.coolwarm)
fig.savefig('testP.png', bbox_inches='tight')
plt.close(fig)
fig1, ax1 = plt.subplots(nrows=1, ncols=2)
ax1[0].axis('off')
ax1[0].imshow(P1II, cmap=cm.coolwarm)
ax1[1].axis('off')
ax1[1].imshow(P0II, cmap=cm.coolwarm)
fig1.savefig('testII.png', bbox_inches='tight')
plt.close(fig1)
uc = 0
br = 364
bc = 229
all0 = getIIsum(P0II, (0,0), (364,229))
der = np.zeros((365,230))
for r in np.arange(5,360,1):
for c in np.arange(5,225,1):
der[r,c] = ((getIIsum(P1II, (r+1, c+1),(br, bc)) - getIIsum(P1II, (r-1, c-1),(br, bc)))/2) + \
(((all0 - getIIsum(P0II, (r+1,c-1),(br, bc)))-(all0 - getIIsum(P0II, (r-1,c+1), (br,bc))))/2)
fig2, ax2 = plt.subplots(nrows=1, ncols=1)
ax2.axis('off')
im = ax2.imshow(der, cmap=cm.coolwarm)
fig2.colorbar(im)
fig2.savefig('testIIder.png', bbox_inches='tight')
print computeLogProb(P1II, P0II, Qmodel, np.array([100,80,200,180]))
OUT = predictLayout(P1II, P0II, Qmodel, init=np.array([100,80,200,180]), thr=thr, T=T, alpha=alpha)
#OUT = predictLayout(init=np.array([100, 80, 200, 180]),
# P1II=P1II, P0II=P0II,
# Qmodel=Qmodel,
# thr=thr, T=T, alpha=alpha)
print OUT
print "test"
if __name__ == '__main__':
_testModule()
| apache-2.0 |
sam81/pychoacoustics | pychoacoustics/response_box.py | 1 | 438232 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2020 Samuele Carcagno <[email protected]>
# This file is part of pychoacoustics
# pychoacoustics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pychoacoustics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pychoacoustics. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt, QEvent, QThread, QDate, QRegExp, QTime, QDateTime, QRect
from PyQt4.QtGui import QAction, QApplication, QComboBox, QDesktopWidget, QDoubleValidator, QFileDialog, QFrame, QGridLayout, QIcon, QInputDialog, QIntValidator, QLabel, QLineEdit, QMainWindow, QMessageBox, QPainter, QProgressBar, QPushButton, QScrollArea, QShortcut, QSizePolicy, QSpacerItem, QValidator, QVBoxLayout, QWidget, QWidgetItem
QFileDialog.getOpenFileName = QFileDialog.getOpenFileNameAndFilter
QFileDialog.getOpenFileNames = QFileDialog.getOpenFileNamesAndFilter
QFileDialog.getSaveFileName = QFileDialog.getSaveFileNameAndFilter
try:
import matplotlib
matplotlib_available = True
matplotlib.rcParams['backend'] = "Qt4Agg"
matplotlib.rcParams['backend.qt4'] = "PyQt4"
except:
matplotlib_available = False
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtCore import Qt, QEvent, QThread, QDate, QRegExp, QTime, QDateTime, QRect
from PySide.QtGui import QAction, QApplication, QComboBox, QDesktopWidget, QDoubleValidator, QFileDialog, QFrame, QGridLayout, QIcon, QInputDialog, QIntValidator, QLabel, QLineEdit, QMainWindow, QMessageBox, QPainter, QProgressBar, QPushButton, QScrollArea, QShortcut, QSizePolicy, QSpacerItem, QValidator, QVBoxLayout, QWidget, QWidgetItem
try:
import matplotlib
matplotlib_available = True
matplotlib.rcParams['backend'] = "Qt4Agg"
matplotlib.rcParams['backend.qt4'] = "PySide"
except:
matplotlib_available = False
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import Qt, QEvent, QThread, QDate, QRegExp, QTime, QDateTime, QRect
from PyQt5.QtWidgets import QAction, QApplication, QComboBox, QDesktopWidget, QFileDialog, QFrame, QGridLayout, QInputDialog, QLabel, QLineEdit, QMainWindow, QMessageBox, QProgressBar, QPushButton, QScrollArea, QShortcut, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget, QWidgetItem
from PyQt5.QtGui import QDoubleValidator, QIcon, QIntValidator, QPainter, QValidator
try:
import matplotlib
matplotlib_available = True
matplotlib.rcParams['backend'] = "Qt5Agg"
except:
matplotlib_available = False
try:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
matplotlib_available = True
except:
matplotlib_available = False
from numpy.fft import rfft, irfft, fft, ifft
import base64, fnmatch, copy, numpy, os, platform, random, string, smtplib, sys, time
from numpy import abs, array, concatenate, exp, float64, log, log10, nan, mean, repeat, std
from .utils_general import*
from .stats_utils import*
from .pysdt import*
from scipy.stats.distributions import norm
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email import encoders
from .audio_manager import*
from .dialog_show_instructions import*
from .stats_utils import*
from .sndlib import*
from .utils_general import*
from .utils_process_results import*
from .PSI_method import*
from .PSI_method_est_guess import setupPSIEstGuessRate, PSIEstGuessRate_update
from .UML_method import*
from .UML_method_est_guess import setupUMLEstGuessRate, UMLEstGuessRate_update
try:
import pandas
pandas_available = True
except:
pandas_available = False
if matplotlib_available and pandas_available:
from .win_categorical_plot import*
from . import default_experiments
homeExperimentsPath = os.path.normpath(os.path.expanduser("~") +'/pychoacoustics_exp/')
if os.path.exists(os.path.normpath(homeExperimentsPath + '/labexp/__init__.py')) == True:
sys.path.append(homeExperimentsPath)
try:
import labexp
from labexp import*
labexp_exists = True
except:
labexp_exists = False
class responseBox(QMainWindow):
def __init__(self, parent):
QMainWindow.__init__(self, parent)
self.emailThread = emailSender(self)
self.executerThread = commandExecuter(self)
self.playThread = threadedPlayer(self)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint)
#self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowSystemMenuHint)
self.setWindowModality(Qt.NonModal)
self.prm = parent.prm
self.audioManager = parent.audioManager
self.currLocale = self.parent().prm['currentLocale']
self.currLocale.setNumberOptions(self.currLocale.OmitGroupSeparator | self.currLocale.RejectGroupSeparator)
self.setWindowTitle(self.tr('Response Box'))
self.responseBoxButtonFont = QFont()
self.responseBoxButtonFont.fromString(self.prm["pref"]["resp_box"]["responseBoxButtonFont"])
#self.setStyleSheet("QPushButton[responseBoxButton='true'] {font-weight:bold; font-size: %spx;} " % self.prm['pref']['interface']['responseButtonSize'])
self.menubar = self.menuBar()
#FILE MENU
self.fileMenu = self.menubar.addMenu(self.tr('-'))
## for some reason couldn't get these into translation files in any other way
foo = self.prm['rbTrans'].translate('rb', "CORRECT")
foo = self.prm['rbTrans'].translate('rb', "INCORRECT")
foo = self.prm['rbTrans'].translate('rb', "DONE")
foo = self.prm['rbTrans'].translate('rb', "")
##
self.toggleControlWin = QAction(self.tr('Show/Hide Control Window'), self)
self.toggleControlWin.setShortcut('Ctrl+C')
self.toggleControlWin.setCheckable(True)
#self.toggleControlWin.setStatusTip(self.tr('Toggle Control Window'))
self.toggleControlWin.triggered.connect(self.onToggleControlWin)
if self.prm['hideWins'] == True:
self.toggleControlWin.setChecked(False)
else:
self.toggleControlWin.setChecked(True)
self.toggleGauge = QAction(self.tr('Show/Hide Progress Bar'), self)
self.toggleGauge.setShortcut('Ctrl+P')
self.toggleGauge.setCheckable(True)
self.toggleGauge.triggered.connect(self.onToggleGauge)
self.toggleBlockGauge = QAction(self.tr('Show/Hide Block Progress Bar'), self)
self.toggleBlockGauge.setShortcut('Ctrl+B')
self.toggleBlockGauge.setCheckable(True)
self.toggleBlockGauge.triggered.connect(self.onToggleBlockGauge)
#self.toggleBlockGauge.setChecked(True)
#self.statusBar()
self.fileMenu.addAction(self.toggleControlWin)
self.fileMenu.addAction(self.toggleGauge)
self.fileMenu.addAction(self.toggleBlockGauge)
#HELP MENU
self.helpMenu = self.menubar.addMenu(self.tr('&Help'))
self.showInstructions = QAction(self.tr('Show Task Instructions'), self)
self.showInstructions.triggered.connect(self.onClickShowInstructions)
self.helpMenu.addAction(self.showInstructions)
self.rb = QFrame()
self.rb.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
self.rb_sizer = QVBoxLayout()
self.intervalSizer = QGridLayout()
self.responseButtonSizer = QGridLayout()
self.RBTaskLabel = QLabel(self.parent().taskLabelTF.text())
self.RBTaskLabel.setAlignment(Qt.AlignCenter)
self.statusButton = QPushButton(self.prm['rbTrans'].translate('rb', "Wait"), self)
self.statusButton.clicked.connect(self.onClickStatusButton)
self.statusButton.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#self.statusButton.setProperty("responseBoxButton", True)
self.statusButton.setFont(self.responseBoxButtonFont)
self.statBtnShortcut = QShortcut("Ctrl+R", self, activated = self.onClickStatusButton)
self.statusButton.setToolTip(self.tr("Press Ctrl+R to activate"))
self.responseLight = responseLight(self)
self.gauge = QProgressBar(self)
self.gauge.setRange(0, 100)
self.blockGauge = QProgressBar(self)
self.rb_sizer.addWidget(self.RBTaskLabel)
self.rb_sizer.addWidget(self.statusButton)
self.rb_sizer.addSpacing(20)
self.rb_sizer.addWidget(self.responseLight)
self.rb_sizer.addSpacing(20)
self.intervalLight = []
self.responseButton = []
self.setupLights()
self.rb_sizer.addLayout(self.intervalSizer)
self.rb_sizer.addSpacing(5)
self.rb_sizer.addLayout(self.responseButtonSizer)
self.rb_sizer.addSpacing(20)
self.rb_sizer.addWidget(self.gauge)
self.rb_sizer.addWidget(self.blockGauge)
if self.prm['progbar'] == True:
self.toggleGauge.setChecked(True)
self.onToggleGauge()
else:
self.toggleGauge.setChecked(False)
self.onToggleGauge()
if self.prm["pref"]["general"]["showBlockProgBar"] == True:
self.toggleBlockGauge.setChecked(True)
self.onToggleBlockGauge()
else:
self.toggleBlockGauge.setChecked(False)
self.onToggleBlockGauge()
if self.prm['blockProgbar'] == True:
self.toggleBlockGauge.setChecked(True)
self.onToggleBlockGauge()
self.rb.setLayout(self.rb_sizer)
self.setCentralWidget(self.rb)
if self.prm['startMinimized'] == True:
self.showMinimized()
else:
self.show()
self.prm['listener'] = self.parent().listenerTF.text()
self.prm['sessionLabel'] = self.parent().sessionLabelTF.text()
if self.prm['hideWins'] == True:
self.parent().hide()
# def clearLayout(self, layout):
# #http://stackoverflow.com/questions/9374063/pyqt4-remove-widgets-and-layout-as-well
# for i in reversed(range(layout.count())):
# item = layout.itemAt(i)
# layout.removeItem(item)
# if isinstance(item, QWidgetItem):
# #item.widget().close()
# # or
# item.widget().setParent(None)
# elif isinstance(item, QSpacerItem):
# pass
# # no need to do extra stuff
# else:
# self.clearLayout(item.layout())
# # remove the item from layout
def clearLayout(self, layout):
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearLayout(item.layout())
def setupLights(self):
nIntervals = self.prm['nIntervals']
nResponseIntervals = nIntervals
#remove previous lights and buttons
self.clearLayout(self.intervalSizer)
self.intervalLight = []
self.clearLayout(self.responseButtonSizer)
self.responseButton = []
n = 0
if self.prm["warningInterval"] == True:
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
if self.prm[self.parent().currExp]["hasAlternativesChooser"] == True:
nAlternatives = self.currLocale.toInt(self.parent().nAlternativesChooser.currentText())[0]
else:
nAlternatives = nIntervals
screen = QDesktopWidget().screenGeometry()
if self.parent().currExp == self.tr("Coordinate Response Measure"):
self.statusButton.setMaximumSize(screen.width(), screen.height()/15)
self.responseLight.setMaximumSize(screen.width(), screen.height()/10)
self.statusButton.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.responseLight.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
cols = ["cornflowerblue", "red", "white", "green"]
cnt = 0
for cl in range(len(cols)):
for rw in range(4):
self.responseButton.append(QPushButton(str(rw+1), self))
self.responseButtonSizer.addWidget(self.responseButton[cnt], rw, cl)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.responseButton[cnt].setSizePolicy(sizePolicy)
#self.responseButton[cnt].setProperty("responseBoxButton", True)
self.responseButton[cnt].setFont(self.responseBoxButtonFont)
self.responseButton[cnt].clicked.connect(self.sortResponseButton)
self.responseButton[cnt].setFocusPolicy(Qt.NoFocus)
self.responseButton[cnt].setStyleSheet("background-color: " + cols[cl])
cnt = cnt+1
elif self.parent().currExp in [self.tr("Digit Triplets Test"), self.tr("Digit Span")]:
self.statusButton.setMaximumSize(screen.width(), screen.height()/15)
self.responseLight.setMaximumSize(screen.width(), screen.height()/10)
self.statusButton.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.responseLight.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
cnt = 0
self.responseButton.append(QPushButton("0", self))
self.responseButtonSizer.addWidget(self.responseButton[cnt], 3, 1)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.responseButton[cnt].setSizePolicy(sizePolicy)
self.responseButton[cnt].setFont(self.responseBoxButtonFont)
self.responseButton[cnt].clicked.connect(self.dialerButtonClicked)
self.responseButton[cnt].setFocusPolicy(Qt.NoFocus)
cnt = cnt+1
for rw in range(3):
for cl in range(3):
self.responseButton.append(QPushButton(str(cnt), self))
self.responseButtonSizer.addWidget(self.responseButton[cnt], rw, cl)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.responseButton[cnt].setSizePolicy(sizePolicy)
self.responseButton[cnt].setFont(self.responseBoxButtonFont)
self.responseButton[cnt].clicked.connect(self.dialerButtonClicked)
self.responseButton[cnt].setFocusPolicy(Qt.NoFocus)
cnt = cnt+1
self.responseButton.append(QPushButton(self.tr("Backspace"), self))
self.responseButtonSizer.addWidget(self.responseButton[cnt], 3, 0)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.responseButton[cnt].setSizePolicy(sizePolicy)
self.responseButton[cnt].setFont(self.responseBoxButtonFont)
self.responseButton[cnt].clicked.connect(self.backspaceButtonPressed)
self.responseButton[cnt].setFocusPolicy(Qt.NoFocus)
cnt = cnt+1
self.responseButton.append(QPushButton(self.tr("Enter"), self))
self.responseButtonSizer.addWidget(self.responseButton[cnt], 3, 2)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.responseButton[cnt].setSizePolicy(sizePolicy)
self.responseButton[cnt].setFont(self.responseBoxButtonFont)
self.responseButton[cnt].clicked.connect(self.enterButtonPressed)
self.responseButton[cnt].setFocusPolicy(Qt.NoFocus)
self.dialerResponseField = QLineEdit("")
if self.parent().currExp == self.tr("Digit Triplets Test"): #only three max digits
self.dialerResponseField.setValidator(QIntValidator(0, 999, self))
else:
self.dialerResponseField.setValidator(ValidDigitSequence(self)) #QIntValidator doesn't accept digit sequences greater than 2^31 or something like that so we have to use a custom validator
self.responseButtonSizer.addWidget(self.dialerResponseField, 4, 0, 1, 3)
self.dialerResponseField.returnPressed.connect(self.enterButtonPressed)
self.dialerResponseField.setSizePolicy(sizePolicy)
self.dialerResponseField.setStyleSheet("font-size: 40px")
else:
self.statusButton.setMaximumSize(screen.width(), screen.height())
self.responseLight.setMaximumSize(screen.width(), screen.height())
if self.parent().currParadigm in ["Transformed Up-Down", #add translation
"Transformed Up-Down Limited",
"Transformed Up-Down Hybrid",
"Weighted Up-Down",
"Weighted Up-Down Limited",
"Weighted Up-Down Hybrid",
"Constant m-Intervals n-Alternatives",
"Transformed Up-Down Interleaved",
"Weighted Up-Down Interleaved",
"Multiple Constants m-Intervals n-Alternatives",
"PEST",
"Maximum Likelihood",
"PSI",
"UML"]:
if self.prm["preTrialInterval"] == True:
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
for i in range(nIntervals):
if self.prm["precursorInterval"] == True:
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
if self.prm["postcursorInterval"] == True:
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
r = 0
if self.prm["warningInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
if self.prm["preTrialInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
if nAlternatives == nIntervals:
for i in range(nAlternatives):
if self.prm["precursorInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
self.responseButton.append(QPushButton(str(i+1), self))
self.responseButtonSizer.addWidget(self.responseButton[i], 1, r)
self.responseButton[i].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#self.responseButton[i].setProperty("responseBoxButton", True)
self.responseButton[i].setFont(self.responseBoxButtonFont)
r = r+1
if self.prm[self.parent().currExp]["hasPostcursorInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
self.responseButton[i].clicked.connect(self.sortResponseButton)
self.responseButton[i].setFocusPolicy(Qt.NoFocus)
elif nAlternatives == nIntervals-1:
for i in range(nAlternatives):
if self.prm[self.parent().currExp]["hasPrecursorInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
if i == 0:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
self.responseButton.append(QPushButton(str(i+1), self))
self.responseButtonSizer.addWidget(self.responseButton[i], 1, r)
self.responseButton[i].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#self.responseButton[i].setProperty("responseBoxButton", True)
self.responseButton[i].setFont(self.responseBoxButtonFont)
r = r+1
self.responseButton[i].clicked.connect(self.sortResponseButton)
self.responseButton[i].setFocusPolicy(Qt.NoFocus)
if self.prm[self.parent().currExp]["hasPostcursorInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
elif self.parent().currParadigm in ["Constant 1-Interval 2-Alternatives",
"Multiple Constants 1-Interval 2-Alternatives",
"Constant 1-Pair Same/Different",
"Multiple Constants 1-Pair Same/Different",
"Constant ABX",
"Multiple Constants ABX",
"UML - Est. Guess Rate",
"PSI - Est. Guess Rate"]:
for i in range(nIntervals):
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
for i in range(self.prm['nAlternatives']):
self.responseButton.append(QPushButton(self.prm[self.tr(self.parent().experimentChooser.currentText())]['buttonLabels'][i], self))
self.responseButtonSizer.addWidget(self.responseButton[i], 1, i)
self.responseButton[i].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#self.responseButton[i].setProperty("responseBoxButton", True)
self.responseButton[i].setFont(self.responseBoxButtonFont)
self.responseButton[i].clicked.connect(self.sortResponseButton)
self.responseButton[i].setFocusPolicy(Qt.NoFocus)
elif self.parent().currParadigm in ["Constant ABX", "Multiple Constants ABX"]:
for i in range(3):
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
for i in range(2):
self.responseButton.append(QPushButton(self.prm[self.tr(self.parent().experimentChooser.currentText())]['buttonLabels'][i], self))
self.responseButtonSizer.addWidget(self.responseButton[i], 1, i)
self.responseButton[i].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#self.responseButton[i].setProperty("responseBoxButton", True)
self.responseButton[i].setFont(self.responseBoxButtonFont)
self.responseButton[i].clicked.connect(self.sortResponseButton)
self.responseButton[i].setFocusPolicy(Qt.NoFocus)
elif self.parent().currParadigm in ["Multiple Constants Odd One Out", "Multiple Constants Sound Comparison"]:
for i in range(nIntervals):
self.intervalLight.append(intervalLight(self))
self.intervalSizer.addWidget(self.intervalLight[n], 0, n)
n = n+1
r = 0
if self.prm["warningInterval"] == True:
self.responseButtonSizer.addItem(QSpacerItem(-1, -1, QSizePolicy.Expanding), 0, r)
r = r+1
for i in range(self.prm['nAlternatives']):
self.responseButton.append(QPushButton(str(i+1), self))
self.responseButtonSizer.addWidget(self.responseButton[i], 1, i+r)
self.responseButton[i].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#self.responseButton[i].setProperty("responseBoxButton", True)
self.responseButton[i].setFont(self.responseBoxButtonFont)
self.responseButton[i].clicked.connect(self.sortResponseButton)
self.responseButton[i].setFocusPolicy(Qt.NoFocus)
self.showHideIntervalLights(self.prm['intervalLights'])
def showHideIntervalLights(self, status):
if status == self.tr("Yes"):
for light in self.intervalLight:
light.show()
else:
for light in self.intervalLight:
light.hide()
def onToggleControlWin(self):
if self.toggleControlWin.isChecked() == True:
self.parent().show()
elif self.toggleControlWin.isChecked() == False:
self.parent().hide()
if self.prm['storedBlocks'] > 0:
if self.parent().listenerTF.text() == "" and self.prm['pref']['general']['listenerNameWarn'] == True:
msg = self.prm['rbTrans'].translate('rb', "Please, enter the listener's name:")
text, ok = QInputDialog.getText(self, self.prm['rbTrans'].translate('rb', "Input Dialog:") , msg)
if ok:
self.parent().listenerTF.setText(text)
self.prm['listener'] = text
if self.parent().sessionLabelTF.text() == "" and self.prm['pref']['general']['sessionLabelWarn'] == True:
msg = self.prm['rbTrans'].translate('rb', "Please, enter the session label:")
text, ok = QInputDialog.getText(self, self.prm['rbTrans'].translate('rb', "Input Dialog:") , msg)
if ok:
self.parent().sessionLabelTF.setText(text)
self.prm['sessionLabel'] = text
if 'resultsFile' not in self.prm:
self.onAskSaveResultsButton()
def onClickShowInstructions(self):
dialog = showInstructionsDialog(self)
def onAskSaveResultsButton(self):
ftow = QFileDialog.getSaveFileName(self, self.tr('Choose file to write results'), "", self.tr('All Files (*)'), "", QFileDialog.DontConfirmOverwrite)[0]
if os.path.exists(ftow) == False and len(ftow) > 0:
fName = open(ftow, 'w')
fName.write('')
fName.close()
if len(ftow) > 0:
if fnmatch.fnmatch(ftow, '*.txt') == False:
ftow = ftow + '.txt'
self.prm['resultsFile'] = ftow
self.parent().statusBar().showMessage(self.tr('Saving results to file: ') + self.prm["resultsFile"])
def onToggleGauge(self):
if self.toggleGauge.isChecked() == True:
self.gauge.show()
elif self.toggleGauge.isChecked() == False:
self.gauge.hide()
def onToggleBlockGauge(self):
if self.toggleBlockGauge.isChecked() == True:
self.blockGauge.show()
elif self.toggleBlockGauge.isChecked() == False:
self.blockGauge.hide()
def onClickStatusButton(self):
#print(self.statusButton.text())
if self.prm['storedBlocks'] == 0 or self.statusButton.text() in [self.prm['rbTrans'].translate("rb", "Running"), "&"+self.prm['rbTrans'].translate("rb", "Running")] or self.statusButton.text() in [self.prm['rbTrans'].translate("rb", "Finished"), "&" + self.prm['rbTrans'].translate("rb", "Finished")]:
return
self.parent().compareGuiStoredParameters()
if self.prm['currentBlock'] > self.prm['storedBlocks']: #the user did not choose to store the unsaved block, move to first block
self.parent().moveToBlockPosition(1)
if self.parent().listenerTF.text() == "" and self.prm['pref']['general']['listenerNameWarn'] == True:
msg = self.prm['rbTrans'].translate('rb', "Please, enter the listener's name:")
text, ok = QInputDialog.getText(self, self.prm['rbTrans'].translate('rb', "Input Dialog:") , msg)
if ok:
self.parent().listenerTF.setText(text)
self.prm['listener'] = text
return
if self.parent().sessionLabelTF.text() == "" and self.prm['pref']['general']['sessionLabelWarn'] == True:
msg = self.prm['rbTrans'].translate('rb', "Please, enter the session label:")
text, ok = QInputDialog.getText(self, self.prm['rbTrans'].translate('rb', "Input Dialog:") , msg)
if ok:
self.parent().sessionLabelTF.setText(text)
self.prm['sessionLabel'] = text
return
if int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition']) == 1 and self.prm['allBlocks']['shuffleMode'] == self.tr("Ask") and self.prm["shuffled"] == False and self.prm['storedBlocks'] > 1 :
reply = QMessageBox.question(self, self.prm['rbTrans'].translate('rb', "Message"),
self.prm['rbTrans'].translate('rb', "Do you want to shuffle the blocks?"), QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.parent().onClickShuffleBlocksButton()
self.prm["shuffled"] = True
elif int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition']) == 1 and self.prm["shuffled"] == False and self.prm['allBlocks']['shuffleMode'] == self.tr("Auto") and self.prm['storedBlocks'] > 1 :
self.parent().onClickShuffleBlocksButton()
self.prm["shuffled"] = True
#self.prm[currBlock]['blockPosition']
if int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition']) in self.prm["allBlocks"]["instructionsAt"]:
instrClosed = False
while instrClosed == False:
dialog = showInstructionsDialog(self)
if dialog.exec_():
instrClosed = True
else:
instrClosed = True
time.sleep(1.5)
self.prm['startOfBlock'] = True
self.statusButton.setText(self.prm['rbTrans'].translate("rb", "Running"))
self.prm['trialRunning'] = True
QApplication.processEvents()
if self.prm['allBlocks']['sendTriggers'] == True:
thisSnd = pureTone(440, 0, -200, 980, 10, "Both", self.prm['allBlocks']['sampRate'], 100)
#playCmd = self.prm['pref']['sound']['playCommand']
self.audioManager.playSoundWithTrigger(thisSnd, self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], False, 'ONTrigger.wav', self.prm["pref"]["general"]["ONTrigger"])
print("SENDING START TRIGGER", self.prm["pref"]["general"]["ONTrigger"])
if self.prm['currentBlock'] > self.prm['storedBlocks']:
self.parent().onClickNextBlockPositionButton()
self.doTrial()
def playRandomisedIntervals(self, stimulusCorrect, stimulusIncorrect, preTrialStim=None, precursorStim=None, postCursorStim=None):
# this randint function comes from numpy and has different behaviour than in the python 'random' module
# Return random integers x such that low <= x < high
currBlock = 'b'+ str(self.prm['currentBlock'])
try:
nAlternatives = self.prm[currBlock]['nAlternatives']
nIntervals = self.prm[currBlock]['nIntervals']
except: #this should work for paradigms that don't have the alternatives chooser, hence have a fixed number of response alternatives
nIntervals = self.prm[self.parent().currExp]['defaultNIntervals']
nAlternatives = self.prm[self.parent().currExp]['defaultNAlternatives']
#cmd = self.prm['pref']['sound']['playCommand']
if nAlternatives == nIntervals:
self.correctInterval = numpy.random.randint(0, nIntervals)
self.correctButton = self.correctInterval + 1
elif nAlternatives == nIntervals-1:
self.correctInterval = numpy.random.randint(1, nIntervals)
self.correctButton = self.correctInterval
soundList = []
for i in range(nIntervals):
if i == self.correctInterval:
soundList.append(stimulusCorrect)
else:
foo = stimulusIncorrect.pop()
soundList.append(foo)
nLight = 0
if self.prm["warningInterval"] == True:
self.intervalLight[nLight].setStatus('on')
time.sleep(self.prm[currBlock]['warningIntervalDur']/1000)
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
time.sleep(self.prm[currBlock]['warningIntervalISI']/1000)
if self.prm["preTrialInterval"] == True:
self.intervalLight[nLight].setStatus('on')
self.audioManager.playSound(preTrialStim, self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'pre-trial_interval' +'.wav')
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
time.sleep(self.prm[currBlock]['preTrialIntervalISI']/1000)
for i in range(nIntervals):
if self.prm["precursorInterval"] == True:
self.intervalLight[nLight].setStatus('on')
self.audioManager.playSound(precursorStim, self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'precursor_interval'+str(i+1) +'.wav')
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
time.sleep(self.prm[currBlock]['precursorIntervalISI']/1000)
self.intervalLight[nLight].setStatus('on')
self.audioManager.playSound(soundList[i], self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'interval'+str(i+1) +'.wav')
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
if self.prm["postcursorInterval"] == True:
self.intervalLight[nLight].setStatus('on')
self.audioManager.playSound(postcursorStim, self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'postcursor_interval'+str(i+1) +'.wav')
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
time.sleep(self.prm[currBlock]['postcursorIntervalISI']/1000)
if i < nIntervals-1:
time.sleep(self.prm['isi']/1000.)
def playSequentialIntervals(self, sndList, ISIList=[], trigNum=None):
currBlock = 'b'+ str(self.prm['currentBlock'])
#cmd = self.prm['pref']['sound']['playCommand']
for i in range(len(sndList)):
if self.prm['pref']['sound']['writeSndSeqSegments'] == True:
self.audioManager.scipy_wavwrite("sndSeq%i.wav"%(i+1), self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], sndList[i])
nLight = 0
if self.prm["warningInterval"] == True:
self.intervalLight[nLight].setStatus('on')
time.sleep(self.prm[currBlock]['warningIntervalDur']/1000)
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
time.sleep(self.prm[currBlock]['warningIntervalISI']/1000)
for i in range(len(sndList)):
self.intervalLight[nLight].setStatus('on')
if trigNum != None:
self.audioManager.playSoundWithTrigger(sndList[i], self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'soundSequence.wav', trigNum)
else:
self.audioManager.playSound(sndList[i], self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'soundSequence.wav')
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
if i < (len(sndList) - 1):
time.sleep(ISIList[i]/1000)
return
def playSequentialIntervalsNoLights(self, sndList, ISIList=[], trigNum=None):
currBlock = 'b'+ str(self.prm['currentBlock'])
#self.dialerResponseField.setReadOnly(True)
for i in range(len(sndList)):
if self.prm['pref']['sound']['writeSndSeqSegments'] == True:
self.audioManager.scipy_wavwrite("sndSeq%i.wav"%(i+1), self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], sndList[i])
for i in range(len(sndList)):
if trigNum != None:
self.audioManager.playSoundWithTrigger(sndList[i], self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'soundSequence.wav', trigNum)
else:
self.audioManager.playSound(sndList[i], self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'soundSequence.wav')
if i < (len(sndList) - 1):
time.sleep(ISIList[i]/1000)
#self.dialerResponseField.setReadOnly(False)
#self.dialerResponseField.setText("")
#QApplication.processEvents()
return
def playSoundsWavComp(self, soundList):
currBlock = 'b'+ str(self.prm['currentBlock'])
nIntervals = self.prm['nIntervals']
# numpy.random.shuffle(parent.prm['currStimOrder'])
# parent.correctButton = parent.prm['currStimOrder'].index(2)+1
nLight = 0
if self.prm["warningInterval"] == True:
self.intervalLight[nLight].setStatus('on')
time.sleep(self.prm[currBlock]['warningIntervalDur']/1000)
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
time.sleep(self.prm[currBlock]['warningIntervalISI']/1000)
for i in range(nIntervals):
self.intervalLight[nLight].setStatus('on')
self.audioManager.playSound(soundList[i], self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['writewav'], 'interval'+str(i+1) +'.wav')
self.intervalLight[nLight].setStatus('off')
nLight = nLight+1
if i < nIntervals-1:
time.sleep(self.prm['isi']/1000)
def doTrial(self):
self.prm['trialRunning'] = True
self.prm['sortingResponse'] = False
currBlock = 'b'+ str(self.prm['currentBlock'])
#for compatibility otherwise need to change in all experiments
self.prm['maxLevel'] = self.prm['allBlocks']['maxLevel']
self.prm['sampRate'] = self.prm['allBlocks']['sampRate']
self.prm['nBits'] = self.prm['allBlocks']['nBits']
self.prm['paradigm'] = self.prm[currBlock]['paradigm']
if self.prm[self.parent().currExp]["hasISIBox"] == True:
self.prm['isi'] = self.prm[currBlock]['ISIVal']
if self.prm[self.parent().currExp]["hasAlternativesChooser"] == True:
self.prm['nAlternatives'] = self.prm[currBlock]['nAlternatives']
if self.prm[self.parent().currExp]["hasAltReps"] == True:
self.prm['altReps'] = self.prm[currBlock]['altReps']
self.prm['altRepsISI'] = self.prm[currBlock]['altRepsISI']
else:
self.prm['altReps'] = 0
self.prm["responseLight"] = self.prm[currBlock]['responseLight']
self.prm["responseLightType"] = self.prm[currBlock]['responseLightType']
if self.prm['startOfBlock'] == True:
self.getStartTime()
#clear these variables
self.prm['additional_parameters_to_write'] = {}
self.prm['additional_parameters_to_write_labels'] = []
if self.prm['paradigm'] in [self.tr("Transformed Up-Down Interleaved"),
self.tr("Weighted Up-Down Interleaved")]:
self.prm['nDifferences'] = int(self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("No. Tracks:"))])
if self.prm['nDifferences'] == 1:
self.prm['maxConsecutiveTrials'] = self.tr('unlimited')
else:
self.prm['maxConsecutiveTrials'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Max. Consecutive Trials x Track:"))]
if self.prm['paradigm'] in [self.tr("Transformed Up-Down"), self.tr("Transformed Up-Down Limited")]:
self.prm['numberCorrectNeeded'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Down"))])
self.prm['numberIncorrectNeeded'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Up"))])
self.prm['initialTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Turnpoints"))])
self.prm['totalTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Total Turnpoints"))])
self.prm['adaptiveStepSize1'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 1"))]
self.prm['adaptiveStepSize2'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 2"))]
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['corrTrackDir'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track:"))]
elif self.prm['paradigm'] == self.tr("Transformed Up-Down Interleaved"):
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['turnpointsToAverage'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Turnpoints to average:"))]
self.prm['numberCorrectNeeded'] = []
self.prm['numberIncorrectNeeded'] = []
self.prm['initialTurnpoints'] = []
self.prm['totalTurnpoints'] = []
self.prm['adaptiveStepSize1'] = []
self.prm['adaptiveStepSize2'] = []
self.prm['consecutiveTrialsCounter'] = []
self.prm['corrTrackDir'] = []
for i in range(self.prm['nDifferences']):
self.prm['numberCorrectNeeded'].append(int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Down Track " + str(i+1)))]))
self.prm['numberIncorrectNeeded'].append(int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Up Track " + str(i+1)))]))
self.prm['initialTurnpoints'].append(int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Turnpoints Track " + str(i+1)))]))
self.prm['totalTurnpoints'].append(int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Total Turnpoints Track " + str(i+1)))]))
self.prm['adaptiveStepSize1'].append(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 1 Track " + str(i+1)))])
self.prm['adaptiveStepSize2'].append(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 2 Track " + str(i+1)))])
self.prm['consecutiveTrialsCounter'].append(0)
self.prm['corrTrackDir'].append(self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track {0}:".format(str(i+1))))])
elif self.prm['paradigm'] in [self.tr("Transformed Up-Down Hybrid")]:
self.prm['numberCorrectNeeded'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Down"))])
self.prm['numberIncorrectNeeded'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Up"))])
self.prm['initialTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Turnpoints"))])
self.prm['totalTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Total Turnpoints"))])
self.prm['adaptiveStepSize1'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 1"))]
self.prm['adaptiveStepSize2'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 2"))]
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['corrTrackDir'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track:"))]
self.prm['nTrialsRequiredAtMaxLimit'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Constant No. Trials"))]
#self.prm['minSwitchTrials'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Min. No. trials before switch"))]
#self.prm['adaptiveMaxLimit'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Adapt. Param. Limit"))]
self.prm['switchAfterInitialTurnpoints'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Switch only after initial turnpoints:"))]
elif self.prm['paradigm'] in [self.tr("Weighted Up-Down"), self.tr("Weighted Up-Down Limited")]:
self.prm['percentCorrectTracked'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Percent Correct Tracked"))])
self.prm['initialTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Turnpoints"))])
self.prm['totalTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Total Turnpoints"))])
self.prm['adaptiveStepSize1'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 1"))]
self.prm['adaptiveStepSize2'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 2"))]
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['corrTrackDir'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track:"))]
self.prm['numberCorrectNeeded'] = 1
self.prm['numberIncorrectNeeded'] = 1
elif self.prm['paradigm'] == self.tr("Weighted Up-Down Interleaved"):
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['turnpointsToAverage'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Turnpoints to average:"))]
self.prm['percentCorrectTracked'] = []
self.prm['numberCorrectNeeded'] = []
self.prm['numberIncorrectNeeded'] = []
self.prm['initialTurnpoints'] = []
self.prm['totalTurnpoints'] = []
self.prm['adaptiveStepSize1'] = []
self.prm['adaptiveStepSize2'] = []
self.prm['consecutiveTrialsCounter'] = []
self.prm['corrTrackDir'] = []
for i in range(self.prm['nDifferences']):
self.prm['numberCorrectNeeded'].append(1)
self.prm['numberIncorrectNeeded'].append(1)
self.prm['percentCorrectTracked'].append(float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Percent Correct Tracked Track " + str(i+1)))]))
self.prm['initialTurnpoints'].append(int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Turnpoints Track " + str(i+1)))]))
self.prm['totalTurnpoints'].append(int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Total Turnpoints Track " + str(i+1)))]))
self.prm['adaptiveStepSize1'].append(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 1 Track " + str(i+1)))])
self.prm['adaptiveStepSize2'].append(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 2 Track " + str(i+1)))])
self.prm['consecutiveTrialsCounter'].append(0)
self.prm['corrTrackDir'].append(self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track {0}:".format(str(i+1))))])
elif self.prm['paradigm'] in [self.tr("Weighted Up-Down Hybrid")]:
self.prm['percentCorrectTracked'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Percent Correct Tracked"))])
self.prm['initialTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Turnpoints"))])
self.prm['totalTurnpoints'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Total Turnpoints"))])
self.prm['adaptiveStepSize1'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 1"))]
self.prm['adaptiveStepSize2'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Step Size 2"))]
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['corrTrackDir'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track:"))]
self.prm['numberCorrectNeeded'] = 1
self.prm['numberIncorrectNeeded'] = 1
self.prm['nTrialsRequiredAtMaxLimit'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Constant No. Trials"))]
#self.prm['minSwitchTrials'] = self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Min. No. trials before switch"))]
self.prm['switchAfterInitialTurnpoints'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Switch only after initial turnpoints:"))]
elif self.prm['paradigm'] in [self.tr("Constant m-Intervals n-Alternatives"),
self.tr("Constant 1-Interval 2-Alternatives"),
self.tr("Constant 1-Pair Same/Different")]:
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
self.prm['nPracticeTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Practice Trials"))])
elif self.prm['paradigm'] in [self.tr("Multiple Constants 1-Interval 2-Alternatives"),
self.tr("Multiple Constants m-Intervals n-Alternatives"),
self.tr("Multiple Constants 1-Pair Same/Different"),
self.tr("Multiple Constants ABX"),
self.tr("Multiple Constants Odd One Out"),
self.tr("Multiple Constants Sound Comparison")]:
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
self.prm['nPracticeTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Practice Trials"))])
self.prm['nDifferences'] = int(self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("No. Differences:"))])
if self.prm['startOfBlock'] == True:
self.prm['currentDifference'] = numpy.random.randint(self.prm['nDifferences'])
elif self.prm['paradigm'] == self.tr("PEST"):
self.prm['corrTrackDir'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Corr. Resp. Move Track:"))]
self.prm['adaptiveType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Procedure:"))]
self.prm['initialStepSize'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Initial Step Size"))])
self.prm['minStepSize'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Minimum Step Size"))])
self.prm['maxStepSize'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Maximum Step Size"))])
self.prm['percentCorrectTracked'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Percent Correct Tracked"))])
self.prm['W'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("W"))])
elif self.prm["paradigm"] == self.tr("Maximum Likelihood"):
self.prm['psyFunType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Psychometric Function:"))]
self.prm['psyFunLogScale'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Log scale:"))]
self.prm['psyFunLoMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Minimum"))])
self.prm['psyFunHiMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Maximum"))])
self.prm['psyFunMidPointStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Step"))])
self.prm['percentCorrectTracked'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Percent Correct Tracked"))])
self.prm['psyFunSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Psychometric Function Slope"))])
self.prm['psyFunLapseRate'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Rate"))])
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
elif self.prm["paradigm"] == self.tr("PSI"):
self.prm['psyFunType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Psychometric Function:"))]
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
self.prm['stimScale'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Stim. Scaling:"))]
self.prm['stimLo'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Min"))])
self.prm['stimHi'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Max"))])
self.prm['stimStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Step"))])
self.prm['loMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Min"))])
self.prm['hiMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Max"))])
self.prm['midPointStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Step"))])
self.prm['midPointPrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Mid Point Prior:"))]
self.prm['midPointPriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point mu"))])
self.prm['midPointPriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point STD"))])
self.prm['loSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Min"))])
self.prm['hiSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Max"))])
self.prm['slopeStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Step"))])
self.prm['slopeSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Spacing:"))]
self.prm['slopePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Prior:"))]
self.prm['slopePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope mu"))])
self.prm['slopePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope STD"))])
self.prm['loLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Min"))])
self.prm['hiLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Max"))])
self.prm['lapseStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Step"))])
self.prm['lapseSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Spacing:"))]
self.prm['lapsePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Prior:"))]
self.prm['lapsePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse mu"))])
self.prm['lapsePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse STD"))])
self.prm['margLapse'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Lapse:"))]
self.prm['margSlope'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Slope:"))]
self.prm['margThresh'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Mid Point:"))]
self.prm['startLevelType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Start Level:"))]
elif self.prm["paradigm"] == self.tr("PSI - Est. Guess Rate"):
self.prm['psyFunType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Psychometric Function:"))]
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
self.prm['stimScale'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Stim. Scaling:"))]
self.prm['stimLo'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Min"))])
self.prm['stimHi'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Max"))])
self.prm['stimStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Step"))])
self.prm['loMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Min"))])
self.prm['hiMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Max"))])
self.prm['midPointStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Step"))])
self.prm['midPointPrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Mid Point Prior:"))]
self.prm['midPointPriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point mu"))])
self.prm['midPointPriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point STD"))])
self.prm['loGuess'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess Min"))])
self.prm['hiGuess'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess Max"))])
self.prm['guessStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess Step"))])
self.prm['guessSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Guess Spacing:"))]
self.prm['guessPrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Guess Prior:"))]
self.prm['guessPriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess mu"))])
self.prm['guessPriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess STD"))])
self.prm['loSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Min"))])
self.prm['hiSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Max"))])
self.prm['slopeStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Step"))])
self.prm['slopeSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Spacing:"))]
self.prm['slopePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Prior:"))]
self.prm['slopePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope mu"))])
self.prm['slopePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope STD"))])
self.prm['loLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Min"))])
self.prm['hiLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Max"))])
self.prm['lapseStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Step"))])
self.prm['lapseSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Spacing:"))]
self.prm['lapsePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Prior:"))]
self.prm['lapsePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse mu"))])
self.prm['lapsePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse STD"))])
self.prm['margGuess'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Guess:"))]
self.prm['margLapse'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Lapse:"))]
self.prm['margSlope'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Slope:"))]
self.prm['margThresh'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Marginalize Mid Point:"))]
self.prm['startLevelType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Start Level:"))]
elif self.prm["paradigm"] == self.tr("UML"):
self.prm['psyFunType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Psychometric Function:"))]
self.prm['swptRule'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Swpt. Rule:"))]
self.prm['psyFunPosteriorSummary'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Posterior Summary:"))]
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
self.prm['numberCorrectNeeded'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Down"))])
self.prm['stimScale'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Stim. Scaling:"))]
self.prm['stimLo'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Min"))])
self.prm['stimHi'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Max"))])
self.prm['suggestedLambdaSwpt'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Suggested Lapse Swpt."))])
self.prm['lambdaSwptPC'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Pr. Corr. at Est. Lapse Swpt."))])
self.prm['loMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Min"))])
self.prm['hiMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Max"))])
self.prm['midPointStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Step"))])
self.prm['midPointPrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Mid Point Prior:"))]
self.prm['midPointPriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point mu"))])
self.prm['midPointPriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point STD"))])
self.prm['loSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Min"))])
self.prm['hiSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Max"))])
self.prm['slopeStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Step"))])
self.prm['slopeSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Spacing:"))]
self.prm['slopePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Prior:"))]
self.prm['slopePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope mu"))])
self.prm['slopePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope STD"))])
self.prm['loLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Min"))])
self.prm['hiLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Max"))])
self.prm['lapseStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Step"))])
self.prm['lapseSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Spacing:"))]
self.prm['lapsePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Prior:"))]
self.prm['lapsePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse mu"))])
self.prm['lapsePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse STD"))])
if self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Load UML state from prev. blocks:"))] == "Yes":
self.prm["saveUMLState"] = True
else:
self.prm["saveUMLState"] = False
elif self.prm["paradigm"] == self.tr("UML - Est. Guess Rate"):
self.prm['psyFunType'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Psychometric Function:"))]
self.prm['swptRule'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Swpt. Rule:"))]
self.prm['psyFunPosteriorSummary'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Posterior Summary:"))]
self.prm['nTrials'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("No. Trials"))])
self.prm['numberCorrectNeeded'] = int(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Rule Down"))])
self.prm['stimScale'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Stim. Scaling:"))]
self.prm['stimLo'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Min"))])
self.prm['stimHi'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Stim. Max"))])
self.prm['suggestedLambdaSwpt'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Suggested Lapse Swpt."))])
self.prm['lambdaSwptPC'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Pr. Corr. at Est. Lapse Swpt."))])
self.prm['loMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Min"))])
self.prm['hiMidPoint'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Max"))])
self.prm['midPointStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point Step"))])
self.prm['midPointPrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Mid Point Prior:"))]
self.prm['midPointPriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point mu"))])
self.prm['midPointPriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Mid Point STD"))])
self.prm['loGuess'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess Min"))])
self.prm['hiGuess'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess Max"))])
self.prm['guessStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess Step"))])
self.prm['guessSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Guess Spacing:"))]
self.prm['guessPrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Guess Prior:"))]
self.prm['guessPriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess mu"))])
self.prm['guessPriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Guess STD"))])
self.prm['loSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Min"))])
self.prm['hiSlope'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Max"))])
self.prm['slopeStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope Step"))])
self.prm['slopeSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Spacing:"))]
self.prm['slopePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Slope Prior:"))]
self.prm['slopePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope mu"))])
self.prm['slopePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Slope STD"))])
self.prm['loLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Min"))])
self.prm['hiLapse'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Max"))])
self.prm['lapseStep'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse Step"))])
self.prm['lapseSpacing'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Spacing:"))]
self.prm['lapsePrior'] = self.prm[currBlock]['paradigmChooser'][self.prm[currBlock]['paradigmChooserLabel'].index(self.tr("Lapse Prior:"))]
self.prm['lapsePriorMu'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse mu"))])
self.prm['lapsePriorSTD'] = float(self.prm[currBlock]['paradigmField'][self.prm[currBlock]['paradigmFieldLabel'].index(self.tr("Lapse STD"))])
if self.prm['startOfBlock'] == True and 'resultsFile' not in self.prm:
if self.prm['pref']['general']['resFileFormat'] == 'fixed':
self.prm['resultsFile'] = self.prm['pref']['general']['resFileFixedString']
resFileToOpen = copy.copy(self.prm['pref']['general']['resFileFixedString'])
fName = open(resFileToOpen, 'a')
fName.write('')
fName.close()
elif self.prm['pref']['general']['resFileFormat'] == 'variable':
self.prm['resultsFile'] = self.prm['listener'] + '_' + time.strftime("%y-%m-%d_%H-%M-%S", time.localtime())
if self.prm['paradigm'] in [self.tr("Transformed Up-Down Interleaved"),
self.tr("Weighted Up-Down Interleaved")]:
if self.prm['maxConsecutiveTrials'] == self.tr('unlimited'):
self.prm['currentDifference'] = numpy.random.randint(self.prm['nDifferences'])
elif max(self.prm['consecutiveTrialsCounter']) < int(self.prm['maxConsecutiveTrials']):
self.prm['currentDifference'] = numpy.random.randint(self.prm['nDifferences'])
else:
choices = list(range(self.prm['nDifferences']))
choices.pop(self.prm['consecutiveTrialsCounter'].index(max(self.prm['consecutiveTrialsCounter'])))
self.prm['currentDifference'] = random.choice(choices)
for i in range(self.prm['nDifferences']):
if i == self.prm['currentDifference']:
self.prm['consecutiveTrialsCounter'][self.prm['currentDifference']] = self.prm['consecutiveTrialsCounter'][self.prm['currentDifference']] + 1
else:
self.prm['consecutiveTrialsCounter'][i] = 0
currExp = self.tr(self.prm[currBlock]['experiment'])
self.pychovariables = ["[resDir]",
"[resFile]",
"[resFileTrial]",
"[resFileSess]",
"[resTable]",
"[resTableTrial]",
"[resTableSess]",
"[pdfPlot]",
"[listener]",
"[experimenter]"]
self.pychovariablesSubstitute = [os.path.dirname(self.prm['resultsFile']),
self.prm['resultsFile'],
self.prm['resultsFile'].split('.txt')[0]+ self.prm["pref"]["general"]["fullFileSuffix"],
self.prm['resultsFile'].split('.txt')[0]+ self.prm["pref"]["general"]["sessSummResFileSuffix"],
self.prm['resultsFile'].split('.txt')[0]+'_table.csv',
self.prm['resultsFile'].split('.txt')[0]+'_table' + self.prm["pref"]["general"]["fullFileSuffix"]+'.csv',
self.prm['resultsFile'].split('.txt')[0]+'_table' + self.prm["pref"]["general"]["sessSummResFileSuffix"]+'.csv',
self.prm['resultsFile'].split('.txt')[0]+'_table' + self.prm["pref"]["general"]["sessSummResFileSuffix"]+'.pdf',
self.prm['listener'],
self.prm['allBlocks']['currentExperimenter']]
time.sleep(self.prm[currBlock]['preTrialSilence']/1000)
execString = self.prm[currExp]['execString']
try:
methodToCall1 = getattr(default_experiments, execString)
except:
pass
try:
methodToCall1 = getattr(labexp, execString)
except:
pass
methodToCall2 = getattr(methodToCall1, 'doTrial_'+ execString)
result = methodToCall2(self)
QApplication.processEvents()
self.prm['trialRunning'] = False
if self.prm['allBlocks']['responseMode'] == self.tr("Automatic"):
resp = np.random.binomial(1, self.prm['allBlocks']['autoPCCorr'], 1)[0]
if resp == 1:
self.sortResponse(self.correctButton)
else:
self.sortResponse(random.choice(numpy.delete(numpy.arange(self.prm['nAlternatives'])+1, self.correctButton-1)))
if self.prm['allBlocks']['responseMode'] == self.tr("Psychometric"):
if self.prm['paradigm'] not in [self.tr("Transformed Up-Down"),
self.tr("Weighted Up-Down"),
self.tr("Transformed Up-Down Limited"),
self.tr("Transformed Up-Down Hybrid"),
self.tr("Weighted Up-Down Limited"),
self.tr("Weighted Up-Down Hybrid"),
self.tr("Transformed Up-Down Interleaved"),
self.tr("Weighted Up-Down Interleaved"),
self.tr("PEST"), self.tr("Maximum Likelihood"),
self.tr("PSI"),
self.tr("UML"),
self.tr("UML - Est. Guess Rate"),
self.tr("PSI - Est. Guess Rate")]:
ret = QMessageBox.warning(self, self.tr("Warning"),
self.tr("Sorry, psychometric listener not supported by current paradigm. Please, choose another response mode."),
QMessageBox.Ok)
return
self.prm['responseModeChoices'] = ["Real Listener", "Automatic", "Simulated Listener", "Psychometric"]
if self.prm[currBlock]['psyListFun'] == "Logistic":
if self.prm[currBlock]['psyListFunFit'] == "Linear":
probCorr = logisticPsy(self.prm['adaptiveParam'], self.prm[currBlock]['psyListMidpoint'],
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFunFit'] == "Logarithmic":
# print(self.prm['adaptiveParam'])
# print(self.prm[currBlock]['psyListMidpoint'])
# print(self.prm[currBlock]['psyListSlope'])
# print(1/self.prm[currBlock]['nAlternatives'])
# print(self.prm[currBlock]['psyListLapse'])
probCorr = logisticPsy(np.log(self.prm['adaptiveParam']), np.log(self.prm[currBlock]['psyListMidpoint']),
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFun'] == "Gaussian":
if self.prm[currBlock]['psyListFunFit'] == "Linear":
probCorr = gaussianPsy(self.prm['adaptiveParam'], self.prm[currBlock]['psyListMidpoint'],
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFunFit'] == "Logarithmic":
probCorr = gaussianPsy(np.log(self.prm['adaptiveParam']), np.log(self.prm[currBlock]['psyListMidpoint']),
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFun'] == "Gumbel":
if self.prm[currBlock]['psyListFunFit'] == "Linear":
probCorr = gumbelPsy(self.prm['adaptiveParam'], self.prm[currBlock]['psyListMidpoint'],
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFunFit'] == "Logarithmic":
probCorr = gumbelPsy(np.log(self.prm['adaptiveParam']), np.log(self.prm[currBlock]['psyListMidpoint']),
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFun'] == "Weibull":
if self.prm[currBlock]['psyListFunFit'] == "Linear":
probCorr = weibullPsy(self.prm['adaptiveParam'], self.prm[currBlock]['psyListMidpoint'],
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
elif self.prm[currBlock]['psyListFunFit'] == "Logarithmic":
probCorr = weibullPsy(np.log(self.prm['adaptiveParam']), np.log(self.prm[currBlock]['psyListMidpoint']),
self.prm[currBlock]['psyListSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm[currBlock]['psyListLapse'])
resp = np.random.binomial(1, probCorr, 1)[0]
if resp == 1:
self.sortResponse(self.correctButton)
else:
self.sortResponse(random.choice(numpy.delete(numpy.arange(self.prm['nAlternatives'])+1, self.correctButton-1)))
#==================================================================
def dialerButtonClicked(self):
if self.parent().currExp == self.tr("Digit Span") and self.prm['trialRunning'] == True:
return
buttonClicked = self.responseButton.index(self.sender())
currText = self.dialerResponseField.text()
newText = currText + str(buttonClicked)
if self.parent().currExp == self.tr("Digit Triplets Test"):
nDigits = len(newText)
if nDigits > 3:
newText = newText[0:3]
self.dialerResponseField.setText(newText)
def backspaceButtonPressed(self):
self.dialerResponseField.backspace()
def enterButtonPressed(self):
currText = self.dialerResponseField.text()
if self.parent().currExp == self.tr("Digit Triplets Test"):
if len(currText) < 3:
return
else:
if currText[0] == currText[1] or currText[0] == currText[2] or currText[1] == currText[2]:
ret = QMessageBox.warning(self, self.tr("Warning"),
self.tr("Repeated digits are not allowed. Please, edit your response."),
QMessageBox.Ok)
return
if self.parent().currExp == self.tr("Digit Span"):
if len(currText) < len(str(self.correctButton)):
ret = QMessageBox.warning(self, self.tr("Warning"),
self.tr("Input sequence is shorter than correct sequence."),
QMessageBox.Ok)
return
self.dialerResponseField.setText(" ")
self.dialerResponseField.setText("")
self.sortResponse(int(currText))
def sortResponseButton(self):
#the try-except is here because when the interface is updating between blocks
#the sender may be missing (participants press multiple times response button when interface is changing)
try:
buttonClicked = self.responseButton.index(self.sender())+1
except:
buttonClicked = 0
self.sortResponse(buttonClicked)
def keyPressEvent(self, event):
if (event.type() == QEvent.KeyPress):
if event.key()==Qt.Key_0:
buttonClicked = 0
elif event.key()==Qt.Key_1:
buttonClicked = 1
elif event.key()==Qt.Key_2:
buttonClicked = 2
elif event.key()==Qt.Key_3:
buttonClicked = 3
elif event.key()==Qt.Key_4:
buttonClicked = 4
elif event.key()==Qt.Key_5:
buttonClicked = 5
elif event.key()==Qt.Key_6:
buttonClicked = 6
elif event.key()==Qt.Key_7:
buttonClicked = 7
elif event.key()==Qt.Key_8:
buttonClicked = 8
elif event.key()==Qt.Key_9:
buttonClicked = 9
else:
buttonClicked = 0
self.sortResponse(buttonClicked)
return
def sortResponse(self, buttonClicked):
currBlock = 'b'+ str(self.prm['currentBlock'])
if buttonClicked == 0: #0 is not a response option
return
if self.parent().currExp == self.tr("Digit Triplets Test"):
if buttonClicked < 10:
return
if self.statusButton.text() not in [self.prm['rbTrans'].translate("rb", "Running"), "&" + self.prm['rbTrans'].translate("rb", "Running")]:
return
elif self.parent().currExp == self.tr("Digit Span"):
if self.statusButton.text() not in [self.prm['rbTrans'].translate("rb", "Running"), "&" + self.prm['rbTrans'].translate("rb", "Running")]:
return
else:
if buttonClicked > self.prm['nAlternatives'] or self.statusButton.text() not in [self.prm['rbTrans'].translate("rb", "Running"), "&"+ self.prm['rbTrans'].translate("rb", "Running")]: #self.tr("Running"): #1) do not accept responses outside the possible alternatives and 2) if the block is not running (like wait or finished)
return
if buttonClicked < (self.prm['nAlternatives']+1) and self.prm['trialRunning'] == True: #1) can't remember why I put the first condition 2) do not accept responses while the trial is running
return
if self.prm['sortingResponse'] == True: #Do not accept other responses while processing the current one
return
self.prm['sortingResponse'] = True
if self.prm['paradigm'] == self.tr("Transformed Up-Down"):
self.sortResponseAdaptive(buttonClicked, 'transformedUpDown')
elif self.prm['paradigm'] == self.tr("Transformed Up-Down Interleaved"):
self.sortResponseAdaptiveInterleaved(buttonClicked, 'transformedUpDown')
elif self.prm['paradigm'] == self.tr("Transformed Up-Down Limited"):
self.sortResponseAdaptiveLimited(buttonClicked, 'transformedUpDown')
elif self.prm['paradigm'] == self.tr("Transformed Up-Down Hybrid"):
self.sortResponseAdaptiveHybrid(buttonClicked, 'transformedUpDown')
elif self.prm['paradigm'] == self.tr("Weighted Up-Down"):
self.sortResponseAdaptive(buttonClicked, 'weightedUpDown')
elif self.prm['paradigm'] == self.tr("Weighted Up-Down Interleaved"):
self.sortResponseAdaptiveInterleaved(buttonClicked, 'weightedUpDown')
elif self.prm['paradigm'] == self.tr("Weighted Up-Down Limited"):
self.sortResponseAdaptiveLimited(buttonClicked, 'weightedUpDown')
elif self.prm['paradigm'] == self.tr("Weighted Up-Down Hybrid"):
self.sortResponseAdaptiveHybrid(buttonClicked, 'weightedUpDown')
elif self.prm['paradigm'] == self.tr("Constant 1-Interval 2-Alternatives"):
self.sortResponseConstant1Interval2Alternatives(buttonClicked)
elif self.prm['paradigm'] == self.tr("Multiple Constants 1-Interval 2-Alternatives"):
self.sortResponseMultipleConstants1Interval2Alternatives(buttonClicked)
elif self.prm['paradigm'] == self.tr("Constant m-Intervals n-Alternatives"):
self.sortResponseConstantMIntervalsNAlternatives(buttonClicked)
elif self.prm['paradigm'] == self.tr("Multiple Constants m-Intervals n-Alternatives"):
self.sortResponseMultipleConstantsMIntervalsNAlternatives(buttonClicked)
elif self.prm['paradigm'] == self.tr("Constant 1-Pair Same/Different"):
self.sortResponseConstant1PairSameDifferent(buttonClicked)
elif self.prm['paradigm'] == self.tr("Multiple Constants 1-Pair Same/Different"):
self.sortResponseMultipleConstants1PairSameDifferent(buttonClicked)
elif self.prm['paradigm'] == self.tr("Multiple Constants ABX"):
self.sortResponseMultipleConstantsABX(buttonClicked)
elif self.prm['paradigm'] == self.tr("PEST"):
self.sortResponsePEST(buttonClicked)
elif self.prm['paradigm'] == self.tr("Maximum Likelihood"):
self.sortResponseMaximumLikelihood(buttonClicked)
elif self.prm['paradigm'] == self.tr("PSI"):
self.sortResponsePSI(buttonClicked)
elif self.prm['paradigm'] == self.tr("PSI - Est. Guess Rate"):
self.sortResponsePSIEstGuessRate(buttonClicked)
elif self.prm['paradigm'] == self.tr("UML"):
self.sortResponseUML(buttonClicked)
elif self.prm['paradigm'] == self.tr("UML - Est. Guess Rate"):
self.sortResponseUMLEstGuessRate(buttonClicked)
elif self.prm['paradigm'] == self.tr("Multiple Constants Odd One Out"):
self.sortResponseMultipleConstantsOddOneOut(buttonClicked)
elif self.prm['paradigm'] == self.tr("Multiple Constants Sound Comparison"):
self.sortResponseMultipleConstantsSoundComparison(buttonClicked)
elif self.prm['paradigm'] == self.tr("Adaptive Digit Span"):
self.sortResponseAdaptiveDigitSpan(buttonClicked)
self.prm['sortingResponse'] = False
def sortResponseAdaptive(self, buttonClicked, method):
if self.prm['startOfBlock'] == True:
self.prm['correctCount'] = 0
self.prm['incorrectCount'] = 0
self.prm['nTurnpoints'] = 0
self.prm['startOfBlock'] = False
self.prm['turnpointVal'] = []
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
if self.prm['corrTrackDir'] == self.tr("Down"):
self.prm['corrTrackSign'] = -1
self.prm['incorrTrackSign'] = 1
self.prm['incorrTrackDir'] = self.tr("Up")
else:
self.prm['corrTrackSign'] = 1
self.prm['incorrTrackSign'] = -1
self.prm['incorrTrackDir'] = self.tr("Down")
self.fullFileLines = []
self.fullFileSummLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
stepSize = {}
if method == 'transformedUpDown':
if self.prm['nTurnpoints'] < self.prm['initialTurnpoints']:
stepSize[self.tr("Down")] = self.prm['adaptiveStepSize1']
stepSize[self.tr("Up")] = self.prm['adaptiveStepSize1']
else:
stepSize[self.tr("Down")] = self.prm['adaptiveStepSize2']
stepSize[self.tr("Up")] = self.prm['adaptiveStepSize2']
elif method == 'weightedUpDown':
if self.prm['nTurnpoints'] < self.prm['initialTurnpoints']:
stepSize[self.prm['corrTrackDir']] = self.prm['adaptiveStepSize1']
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize1'] * (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize1'] ** (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
else:
stepSize[self.prm['corrTrackDir']] = self.prm['adaptiveStepSize2']
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize2'] * (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize2'] ** (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('1; ')
self.fullFileLines.append('1; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('1' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['correctCount'] = self.prm['correctCount'] + 1
self.prm['incorrectCount'] = 0
if self.prm['correctCount'] == self.prm['numberCorrectNeeded']:
self.prm['correctCount'] = 0
if self.prm['trackDir'] == self.prm['incorrTrackDir']:
self.prm['turnpointVal'].append(self.prm['adaptiveParam'])
self.prm['nTurnpoints'] = self.prm['nTurnpoints'] +1
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (stepSize[self.prm['corrTrackDir']]*self.prm['corrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (stepSize[self.prm['corrTrackDir']]**self.prm['corrTrackSign'])
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('0; ')
self.fullFileLines.append('0; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('0' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['incorrectCount'] = self.prm['incorrectCount'] + 1
self.prm['correctCount'] = 0
if self.prm['incorrectCount'] == self.prm['numberIncorrectNeeded']:
self.prm['incorrectCount'] = 0
if self.prm['trackDir'] == self.prm['corrTrackDir']:#self.tr('Down'):
self.prm['turnpointVal'].append(self.prm['adaptiveParam'])
self.prm['nTurnpoints'] = self.prm['nTurnpoints'] +1
self.prm['trackDir'] = copy.copy(self.prm['incorrTrackDir'])#self.tr('Up')
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (stepSize[self.prm['incorrTrackDir']]*self.prm['incorrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (stepSize[self.prm['incorrTrackDir']]**self.prm['incorrTrackSign'])
self.fullFileLog.flush()
pcDone = (self.prm['nTurnpoints'] / self.prm['totalTurnpoints']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.prm['nTurnpoints'] == self.prm['totalTurnpoints']:
self.writeResultsHeader('standard')
#process results
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
for i in range(len(self.prm['turnpointVal'])):
if i == self.prm['initialTurnpoints']:
self.resFile.write('| ')
self.resFile.write('%5.2f ' %self.prm['turnpointVal'][i])
self.resFileLog.write('%5.2f ' %self.prm['turnpointVal'][i])
if i == self.prm['totalTurnpoints']-1:
self.resFile.write('| ')
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
finalTurnpoints = array(self.prm['turnpointVal'][self.prm['initialTurnpoints'] : self.prm['totalTurnpoints']], dtype=float64)
turnpointMean = mean(finalTurnpoints)
turnpointSd = std(finalTurnpoints, ddof=1)
self.resFile.write('\n\n')
self.resFile.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
finalTurnpoints = abs(array(self.prm['turnpointVal'][self.prm['initialTurnpoints'] : self.prm['totalTurnpoints']], dtype=float64))
turnpointMean = geoMean(finalTurnpoints)
turnpointSd = geoSd(finalTurnpoints)
self.resFile.write('\n\n')
self.resFile.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(turnpointMean) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.3f}'.format(turnpointSd) + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryLine('Transformed Up-Down', resLineToWrite)
elif method == 'weightedUpDown':
self.writeResultsSummaryLine('Weighted Up-Down', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryFullLine('Transformed Up-Down', resLineToWriteSummFull)
elif method == 'weightedUpDown':
self.writeResultsSummaryFullLine('Weighted Up-Down', resLineToWriteSummFull)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseAdaptiveHybrid(self, buttonClicked, method):
# procedure inspired by Hopkins, K., & Moore, B. C. J. (2010). Development of a fast method for measuring sensitivity to temporal fine structure information at low frequencies. International Journal of Audiology, 49(12), 940–6. http://doi.org/10.3109/14992027.2010.512613
# see also:
# King, A., Hopkins, K., & Plack, C. J. (2014). The effects of age and hearing loss on interaural phase difference discrimination. The Journal of the Acoustical Society of America, 135(1), 342–51. http://doi.org/10.1121/1.4838995
# if the adaptive track reaches self.prm['adaptiveMaxLimit'], switch to a constant procedure
# that measures percent correct at self.prm['adaptiveMaxLimit'] for self.prm['nTrialsRequiredAtMaxLimit']
# note that the the value of parent.prm['adaptiveParam'] needs to be limited in the experiment file!
if self.prm['startOfBlock'] == True:
self.prm['blockHasEnded'] = False
self.prm['correctCount'] = 0
self.prm['incorrectCount'] = 0
self.prm['nTurnpoints'] = 0
self.prm['startOfBlock'] = False
self.prm['turnpointVal'] = []
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
self.prm['nCorrectAtMaxLimit'] = 0 ##
self.prm['nTotalAtMaxLimit'] = 0 ##
self.prm['percentCorrectAtMaxLimit'] = numpy.nan ##
self.prm['switchedToConstant'] = False ##
if method == 'transformedUpDown':
self.prm['percentCorrectTracked'] = (0.5**(1/self.prm['numberCorrectNeeded']))*100
if self.prm['corrTrackDir'] == self.tr("Down"):
self.prm['corrTrackSign'] = -1
self.prm['incorrTrackSign'] = 1
self.prm['incorrTrackDir'] = self.tr("Up")
else:
self.prm['corrTrackSign'] = 1
self.prm['incorrTrackSign'] = -1
self.prm['incorrTrackDir'] = self.tr("Down")
self.fullFileLines = []
self.fullFileSummLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
stepSize = {}
if method == 'transformedUpDown':
if self.prm['nTurnpoints'] < self.prm['initialTurnpoints']:
stepSize[self.tr("Down")] = self.prm['adaptiveStepSize1']
stepSize[self.tr("Up")] = self.prm['adaptiveStepSize1']
else:
stepSize[self.tr("Down")] = self.prm['adaptiveStepSize2']
stepSize[self.tr("Up")] = self.prm['adaptiveStepSize2']
elif method == 'weightedUpDown':
if self.prm['nTurnpoints'] < self.prm['initialTurnpoints']:
stepSize[self.prm['corrTrackDir']] = self.prm['adaptiveStepSize1']
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize1'] * (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize1'] ** (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
else:
stepSize[self.prm['corrTrackDir']] = self.prm['adaptiveStepSize2']
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize2'] * (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize2'] ** (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
#--..--
# if self.prm['adaptiveParam'] >= self.prm['adaptiveMaxLimit']:
# self.prm['nTotalAtMaxLimit'] = self.prm['nTotalAtMaxLimit']+1
# if buttonClicked == self.correctButton:
# self.prm['nCorrectAtMaxLimit'] = self.prm['nCorrectAtMaxLimit']+1
# self.prm['percentCorrectAtMaxLimit'] = (self.prm['nCorrectAtMaxLimit']/self.prm['nTotalAtMaxLimit'])*100
# if self.prm['nTotalAtMaxLimit'] > self.prm['minSwitchTrials']:
# if self.prm['percentCorrectAtMaxLimit'] < self.prm['percentCorrectTracked']:
# self.prm['switchedToConstant'] = True
if self.prm['switchAfterInitialTurnpoints'] == self.tr("Yes"):
if self.prm['adaptiveParam'] >= self.prm['adaptiveMaxLimit'] and self.prm['nTurnpoints'] > self.prm['initialTurnpoints']:
self.prm['nTotalAtMaxLimit'] = self.prm['nTotalAtMaxLimit']+1
if buttonClicked == self.correctButton:
self.prm['nCorrectAtMaxLimit'] = self.prm['nCorrectAtMaxLimit']+1
self.prm['percentCorrectAtMaxLimit'] = (self.prm['nCorrectAtMaxLimit']/self.prm['nTotalAtMaxLimit'])*100
else:
if self.prm['adaptiveParam'] >= self.prm['adaptiveMaxLimit']:
self.prm['nTotalAtMaxLimit'] = self.prm['nTotalAtMaxLimit']+1
if buttonClicked == self.correctButton:
self.prm['nCorrectAtMaxLimit'] = self.prm['nCorrectAtMaxLimit']+1
self.prm['percentCorrectAtMaxLimit'] = (self.prm['nCorrectAtMaxLimit']/self.prm['nTotalAtMaxLimit'])*100
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('1; ')
self.fullFileLines.append('1; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('1' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
if self.prm['switchedToConstant'] == False:
self.prm['correctCount'] = self.prm['correctCount'] + 1
self.prm['incorrectCount'] = 0
if self.prm['correctCount'] == self.prm['numberCorrectNeeded']:
self.prm['correctCount'] = 0
if self.prm['trackDir'] == self.prm['incorrTrackDir']:
self.prm['turnpointVal'].append(self.prm['adaptiveParam'])
self.prm['nTurnpoints'] = self.prm['nTurnpoints'] +1
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (stepSize[self.prm['corrTrackDir']]*self.prm['corrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (stepSize[self.prm['corrTrackDir']]**self.prm['corrTrackSign'])
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('0; ')
self.fullFileLines.append('0; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('0' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
if self.prm['switchedToConstant'] == False:
self.prm['incorrectCount'] = self.prm['incorrectCount'] + 1
self.prm['correctCount'] = 0
if self.prm['incorrectCount'] == self.prm['numberIncorrectNeeded']:
self.prm['incorrectCount'] = 0
if self.prm['trackDir'] == self.prm['corrTrackDir']:#self.tr('Down'):
self.prm['turnpointVal'].append(self.prm['adaptiveParam'])
self.prm['nTurnpoints'] = self.prm['nTurnpoints'] +1
self.prm['trackDir'] = copy.copy(self.prm['incorrTrackDir'])#self.tr('Up')
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (stepSize[self.prm['incorrTrackDir']]*self.prm['incorrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (stepSize[self.prm['incorrTrackDir']]**self.prm['incorrTrackSign'])
if self.prm['switchAfterInitialTurnpoints'] == self.tr("Yes"):
if self.prm['adaptiveParam'] >= self.prm['adaptiveMaxLimit'] and self.prm['nTurnpoints'] >= self.prm['initialTurnpoints']:
self.prm['switchedToConstant'] = True
else:
if self.prm['adaptiveParam'] >= self.prm['adaptiveMaxLimit']:
self.prm['switchedToConstant'] = True
#print("Adaptive param. :" + str(self.prm['adaptiveParam']))
#print("PC tracked: " + str(self.prm['percentCorrectTracked']))
print(self.prm['nTurnpoints'])
print("Switched to constant: " + str(self.prm['switchedToConstant']))
print("N corr. at max limit: " + str(self.prm['nCorrectAtMaxLimit']))
print("N tot. at max limit: " + str(self.prm['nTotalAtMaxLimit']))
print("PC at max limit: " + str(self.prm['percentCorrectAtMaxLimit']))
self.fullFileLog.flush()
if self.prm['switchedToConstant'] == False:
pcDone = (self.prm['nTurnpoints'] / self.prm['totalTurnpoints']) * 100
else:
pcDone = (self.prm['nTotalAtMaxLimit'] / self.prm['nTrialsRequiredAtMaxLimit'])*100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.prm['switchedToConstant'] == False:
if self.prm['nTurnpoints'] == self.prm['totalTurnpoints']:
self.prm['blockHasEnded'] = True
self.writeResultsHeader('standard')
#process results
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
for i in range(len(self.prm['turnpointVal'])):
if i == self.prm['initialTurnpoints']:
self.resFile.write('| ')
self.resFile.write('%5.2f ' %self.prm['turnpointVal'][i])
self.resFileLog.write('%5.2f ' %self.prm['turnpointVal'][i])
if i == self.prm['totalTurnpoints']-1:
self.resFile.write('| ')
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
finalTurnpoints = array(self.prm['turnpointVal'][self.prm['initialTurnpoints'] : self.prm['totalTurnpoints']], dtype=float64)
turnpointMean = mean(finalTurnpoints)
turnpointSd = std(finalTurnpoints, ddof=1)
self.resFile.write('\n\n')
self.resFile.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
finalTurnpoints = abs(array(self.prm['turnpointVal'][self.prm['initialTurnpoints'] : self.prm['totalTurnpoints']], dtype=float64))
turnpointMean = geoMean(finalTurnpoints)
turnpointSd = geoSd(finalTurnpoints)
self.resFile.write('\n\n')
self.resFile.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
else:
if self.prm['nTotalAtMaxLimit'] >= self.prm['nTrialsRequiredAtMaxLimit']:
self.prm['blockHasEnded'] = True
self.writeResultsHeader('standard')
#process results
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.resFile.write('No. Correct at Max Level = %d \n' %(self.prm['nCorrectAtMaxLimit']))
self.resFile.write('No. Total at Max Level = %d \n' %(self.prm['nTotalAtMaxLimit']))
self.resFile.write('Percent Correct at Max Level = %5.2f \n' %(self.prm['percentCorrectAtMaxLimit']))
self.resFile.write('\n\n')
self.resFileLog.write('No. Correct at Max Level = %d \n' %(self.prm['nCorrectAtMaxLimit']))
self.resFileLog.write('No. Total at Max Level = %d \n' %(self.prm['nTotalAtMaxLimit']))
self.resFileLog.write('Percent Correct at Max Level = %5.2f \n' %(self.prm['percentCorrectAtMaxLimit']))
self.resFileLog.write('\n\n')
turnpointMean = numpy.nan
turnpointSd = numpy.nan
if self.prm['blockHasEnded'] == True:
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(turnpointMean) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.3f}'.format(turnpointSd) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.prm['nCorrectAtMaxLimit']) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.prm['nTotalAtMaxLimit']) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.2f}'.format(self.prm['percentCorrectAtMaxLimit']) + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryLine('Transformed Up-Down Hybrid', resLineToWrite)
elif method == 'weightedUpDown':
self.writeResultsSummaryLine('Weighted Up-Down Hybrid', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryFullLine('Transformed Up-Down Hybrid', resLineToWriteSummFull)
elif method == 'weightedUpDown':
self.writeResultsSummaryFullLine('Weighted Up-Down Hybrid', resLineToWriteSummFull)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseAdaptiveLimited(self, buttonClicked, method):
#I used this procedure a long time ago for an experiment in which participants were stuck at the maximum
#adaptive difference and the block took too long to complete. To speed up things this procedure will call
#a turnpoint not only when the track is going from the "incorrect" direction to the "correct" direction,
# but also when self.prm['adaptiveParam'] == self.prm['adaptiveMaxLimit'].
#This was done only to speed things up, and in retrospect it was not the most elegant solution.
#I do not recommend using this procedure in general. It is here mainly for historical purposes.
if self.prm['startOfBlock'] == True:
self.prm['correctCount'] = 0
self.prm['incorrectCount'] = 0
self.prm['nTurnpoints'] = 0
self.prm['startOfBlock'] = False
self.prm['turnpointVal'] = []
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
if self.prm['corrTrackDir'] == self.tr("Down"):
self.prm['corrTrackSign'] = -1
self.prm['incorrTrackSign'] = 1
self.prm['incorrTrackDir'] = self.tr("Up")
else:
self.prm['corrTrackSign'] = 1
self.prm['incorrTrackSign'] = -1
self.prm['incorrTrackDir'] = self.tr("Down")
self.fullFileLines = []
self.fullFileSummLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
stepSize = {}
if method == 'transformedUpDown':
if self.prm['nTurnpoints'] < self.prm['initialTurnpoints']:
stepSize[self.tr("Down")] = self.prm['adaptiveStepSize1']
stepSize[self.tr("Up")] = self.prm['adaptiveStepSize1']
else:
stepSize[self.tr("Down")] = self.prm['adaptiveStepSize2']
stepSize[self.tr("Up")] = self.prm['adaptiveStepSize2']
elif method == 'weightedUpDown':
if self.prm['nTurnpoints'] < self.prm['initialTurnpoints']:
stepSize[self.prm['corrTrackDir']] = self.prm['adaptiveStepSize1']
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize1'] * (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize1'] ** (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
else:
stepSize[self.prm['corrTrackDir']] = self.prm['adaptiveStepSize2']
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize2'] * (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir']] = self.prm['adaptiveStepSize2'] ** (self.prm['percentCorrectTracked'] / (100-self.prm['percentCorrectTracked']))
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('1; ')
self.fullFileLines.append('1; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('1' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['correctCount'] = self.prm['correctCount'] + 1
self.prm['incorrectCount'] = 0
if self.prm['correctCount'] == self.prm['numberCorrectNeeded']:
self.prm['correctCount'] = 0
if self.prm['trackDir'] == self.prm['incorrTrackDir']:
self.prm['turnpointVal'].append(self.prm['adaptiveParam'])
self.prm['nTurnpoints'] = self.prm['nTurnpoints'] +1
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (stepSize[self.prm['corrTrackDir']]*self.prm['corrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (stepSize[self.prm['corrTrackDir']]**self.prm['corrTrackSign'])
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('0; ')
self.fullFileLines.append('0; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('0' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['incorrectCount'] = self.prm['incorrectCount'] + 1
self.prm['correctCount'] = 0
if self.prm['incorrectCount'] == self.prm['numberIncorrectNeeded']:
self.prm['incorrectCount'] = 0
if self.prm['trackDir'] == self.prm['corrTrackDir'] or self.prm['adaptiveParam'] == self.prm['adaptiveMaxLimit']:
self.prm['turnpointVal'].append(self.prm['adaptiveParam'])
self.prm['nTurnpoints'] = self.prm['nTurnpoints'] +1
self.prm['trackDir'] = copy.copy(self.prm['incorrTrackDir'])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (stepSize[self.prm['incorrTrackDir']]*self.prm['incorrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (stepSize[self.prm['incorrTrackDir']]**self.prm['incorrTrackSign'])
self.fullFileLog.flush()
pcDone = (self.prm['nTurnpoints'] / self.prm['totalTurnpoints']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.prm['nTurnpoints'] == self.prm['totalTurnpoints']:
self.writeResultsHeader('standard')
#process results
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
for i in range(len(self.prm['turnpointVal'])):
if i == self.prm['initialTurnpoints']:
self.resFile.write('| ')
self.resFile.write('%5.2f ' %self.prm['turnpointVal'][i])
self.resFileLog.write('%5.2f ' %self.prm['turnpointVal'][i])
if i == self.prm['totalTurnpoints']-1:
self.resFile.write('| ')
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
finalTurnpoints = array(self.prm['turnpointVal'][self.prm['initialTurnpoints'] : self.prm['totalTurnpoints']], dtype=float64)
turnpointMean = mean(finalTurnpoints)
turnpointSd = std(finalTurnpoints, ddof=1)
self.resFile.write('\n\n')
self.resFile.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
finalTurnpoints = abs(array(self.prm['turnpointVal'][self.prm['initialTurnpoints'] : self.prm['totalTurnpoints']], dtype=float64))
turnpointMean = geoMean(finalTurnpoints)
turnpointSd = geoSd(finalTurnpoints)
self.resFile.write('\n\n')
self.resFile.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean,turnpointSd))
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(turnpointMean) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.3f}'.format(turnpointSd) + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryLine('Transformed Up-Down Limited', resLineToWrite)
elif method == 'weightedUpDown':
self.writeResultsSummaryLine('Weighted Up-Down Limited', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryFullLine('Transformed Up-Down Limited', resLineToWriteSummFull)
elif method == 'weightedUpDown':
self.writeResultsSummaryFullLine('Weighted Up-Down Limited', resLineToWriteSummFull)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseAdaptiveInterleaved(self, buttonClicked, method):
if self.prm['startOfBlock'] == True:
self.prm['correctCount'] = [0 for number in range(self.prm['nDifferences'])]
self.prm['incorrectCount'] = [0 for number in range(self.prm['nDifferences'])]
self.prm['nTurnpoints'] = [0 for number in range(self.prm['nDifferences'])]
self.prm['startOfBlock'] = False
self.prm['turnpointVal'] = [[] for number in range(self.prm['nDifferences'])]
self.fullFileLines = []
self.prm['buttonCounter'] = [[0 for a in range(self.prm['nAlternatives'])] for i in range(self.prm['nDifferences'])]
self.prm['trackDir'] = []
self.prm['incorrTrackDir'] = []
self.prm['corrTrackSign'] = []
self.prm['incorrTrackSign'] = []
for i in range(self.prm['nDifferences']):
self.prm['trackDir'].append(copy.copy(self.prm['corrTrackDir'][i]))
if self.prm['corrTrackDir'][i] == self.tr("Down"):
self.prm['corrTrackSign'].append(-1)
self.prm['incorrTrackSign'].append(1)
self.prm['incorrTrackDir'].append(self.tr("Up"))
else:
self.prm['corrTrackSign'].append(1)
self.prm['incorrTrackSign'].append(-1)
self.prm['incorrTrackDir'].append(self.tr("Down"))
if buttonClicked == self.correctButton:
print("Correct Button Clicked")
else:
print("Incorrect Button Clicked")
trackNumber = self.prm['currentDifference']
self.prm['buttonCounter'][trackNumber][buttonClicked-1] = self.prm['buttonCounter'][trackNumber][buttonClicked-1] + 1
stepSize = {}
if method == 'weightedUpDown':
if self.prm['nTurnpoints'][trackNumber] < self.prm['initialTurnpoints'][trackNumber]:
stepSize[self.prm['corrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize1'][trackNumber]
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize1'][trackNumber] * (self.prm['percentCorrectTracked'][trackNumber] / (100-self.prm['percentCorrectTracked'][trackNumber]))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize1'][trackNumber] ** (self.prm['percentCorrectTracked'][trackNumber] / (100-self.prm['percentCorrectTracked'][trackNumber]))
else:
stepSize[self.prm['corrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize2'][trackNumber]
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
stepSize[self.prm['incorrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize2'][trackNumber] * (self.prm['percentCorrectTracked'][trackNumber] / (100-self.prm['percentCorrectTracked'][trackNumber]))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
stepSize[self.prm['incorrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize2'][trackNumber] ** (self.prm['percentCorrectTracked'][trackNumber] / (100-self.prm['percentCorrectTracked'][trackNumber]))
elif method == 'transformedUpDown':
if self.prm['nTurnpoints'][trackNumber] < self.prm['initialTurnpoints'][trackNumber]:
stepSize[self.prm['corrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize1'][trackNumber]
stepSize[self.prm['incorrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize1'][trackNumber]
else:
stepSize[self.prm['corrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize2'][trackNumber]
stepSize[self.prm['incorrTrackDir'][trackNumber]] = self.prm['adaptiveStepSize2'][trackNumber]
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
self.fullFileLog.write(str(self.prm['adaptiveParam'][trackNumber]) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam'][trackNumber]) + '; ')
self.fullFileLog.write('TRACK %d; 1; ' %(trackNumber+1))
self.fullFileLines.append('TRACK %d; 1; ' %(trackNumber+1))
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['correctCount'][trackNumber] = self.prm['correctCount'][trackNumber] + 1
self.prm['incorrectCount'][trackNumber] = 0
if self.prm['correctCount'][trackNumber] == self.prm['numberCorrectNeeded'][trackNumber]:
self.prm['correctCount'][trackNumber] = 0
if self.prm['trackDir'][trackNumber] == self.prm['incorrTrackDir'][trackNumber]:
self.prm['turnpointVal'][trackNumber].append(self.prm['adaptiveParam'][trackNumber])
self.prm['nTurnpoints'][trackNumber] = self.prm['nTurnpoints'][trackNumber] +1
self.prm['trackDir'][trackNumber] = copy.copy(self.prm['corrTrackDir'][trackNumber])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'][trackNumber] = self.prm['adaptiveParam'][trackNumber] + (stepSize[self.prm['corrTrackDir'][trackNumber]]*self.prm['corrTrackSign'][trackNumber])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'][trackNumber] = self.prm['adaptiveParam'][trackNumber] * (stepSize[self.prm['corrTrackDir'][trackNumber]]**self.prm['corrTrackSign'][trackNumber])
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
self.fullFileLog.write(str(self.prm['adaptiveParam'][trackNumber]) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam'][trackNumber]) + '; ')
self.fullFileLog.write('TRACK %d; 0; ' %(trackNumber+1))
self.fullFileLines.append('TRACK %d; 0; ' %(trackNumber+1))
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['incorrectCount'][trackNumber] = self.prm['incorrectCount'][trackNumber] + 1
self.prm['correctCount'][trackNumber] = 0
if self.prm['incorrectCount'][trackNumber] == self.prm['numberIncorrectNeeded'][trackNumber]:
self.prm['incorrectCount'][trackNumber] = 0
if self.prm['trackDir'][trackNumber] == self.prm['corrTrackDir'][trackNumber]:
self.prm['turnpointVal'][trackNumber].append(self.prm['adaptiveParam'][trackNumber])
self.prm['nTurnpoints'][trackNumber] = self.prm['nTurnpoints'][trackNumber] +1
self.prm['trackDir'][trackNumber] = copy.copy(self.prm['incorrTrackDir'][trackNumber])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'][trackNumber] = self.prm['adaptiveParam'][trackNumber] + (stepSize[self.prm['incorrTrackDir'][trackNumber]]*self.prm['incorrTrackSign'][trackNumber])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'][trackNumber] = self.prm['adaptiveParam'][trackNumber] * (stepSize[self.prm['incorrTrackDir'][trackNumber]]**self.prm['incorrTrackSign'][trackNumber])
self.fullFileLog.flush()
currNTurnpoints = 0
currTotTurnpoints = 0
for i in range(self.prm['nDifferences']):
currNTurnpoints = currNTurnpoints + min(self.prm['nTurnpoints'][i], self.prm['totalTurnpoints'][i])
currTotTurnpoints = currTotTurnpoints + self.prm['totalTurnpoints'][i]
pcDone = (currNTurnpoints / currTotTurnpoints) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
finished = 0
for i in range(self.prm['nDifferences']):
if self.prm['nTurnpoints'][i] >= self.prm['totalTurnpoints'][i]:
finished = finished + 1
if finished == self.prm['nDifferences']:
self.writeResultsHeader('standard')
#process results
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
turnpointMeanList = []
turnpointSdList = []
for j in range(self.prm['nDifferences']):
self.resFile.write('TRACK %d:\n' %(j+1))
self.resFileLog.write('TRACK %d:\n' %(j+1))
if self.prm['turnpointsToAverage'] == self.tr('All final stepsize (even)'):
tnpStart = self.prm['initialTurnpoints'][j]
tnpEnd = len(self.prm['turnpointVal'][j])
if (tnpEnd-tnpStart)%2 > 0: #odd number of turnpoints
tnpStart = self.prm['initialTurnpoints'][j] + 1
elif self.prm['turnpointsToAverage'] == self.tr('First N final stepsize'):
tnpStart = self.prm['initialTurnpoints'][j]
tnpEnd = self.prm['totalTurnpoints'][j]
elif self.prm['turnpointsToAverage'] == self.tr('Last N final stepsize'):
tnpStart = len(self.prm['turnpointVal'][j]) - (self.prm['totalTurnpoints'][j] - self.prm['initialTurnpoints'][j])
tnpEnd = len(self.prm['turnpointVal'][j])
for i in range(len(self.prm['turnpointVal'][j])):
if i == (tnpStart):
self.resFile.write('| ')
self.resFileLog.write('| ')
self.resFile.write('%5.2f ' %self.prm['turnpointVal'][j][i])
self.resFileLog.write('%5.2f ' %self.prm['turnpointVal'][j][i])
if i == (tnpEnd-1):
self.resFile.write('| ')
self.resFileLog.write('| ')
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
turnpointMean = mean(array(self.prm['turnpointVal'][j][tnpStart : tnpEnd], dtype=float64))
turnpointSd = std(array(self.prm['turnpointVal'][j][tnpStart : tnpEnd], dtype=float64), ddof=1)
self.resFile.write('\n\n')
self.resFile.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean, turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean, turnpointSd))
turnpointMeanList.append(turnpointMean)
turnpointSdList.append(turnpointSd)
elif self.prm['adaptiveType'] == self.tr("Geometric"):
turnpointMean = geoMean(abs(array(self.prm['turnpointVal'][j][tnpStart : tnpEnd], dtype=float64)))
turnpointSd = geoSd(abs(array(self.prm['turnpointVal'][j][tnpStart : tnpEnd], dtype=float64)))
self.resFile.write('\n\n')
self.resFile.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean, turnpointSd))
self.resFileLog.write('\n\n')
self.resFileLog.write('geometric turnpointMean = %5.2f, s.d. = %5.2f \n' %(turnpointMean, turnpointSd))
turnpointMeanList.append(turnpointMean)
turnpointSdList.append(turnpointSd)
for a in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(a+1, self.prm['buttonCounter'][j][a]))
self.resFileLog.write("B{0} = {1}".format(a+1, self.prm['buttonCounter'][j][a]))
if a != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
if j != self.prm['nDifferences']-1:
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
self.resFile.write('\n.\n')
self.resFile.flush()
self.resFileLog.write('\n.\n')
self.resFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = ''
for j in range(self.prm['nDifferences']):
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(turnpointMeanList[j]) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.3f}'.format(turnpointSdList[j]) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
if method == 'transformedUpDown':
self.writeResultsSummaryLine('Transformed Up-Down Interleaved', resLineToWrite)
elif method == 'weightedUpDown':
self.writeResultsSummaryLine('Weighted Up-Down Interleaved', resLineToWrite)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseConstantMIntervalsNAlternatives(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.trialCount = 0
self.correctCount = 0
self.trialCountAll = 0
self.trialCountAll = self.trialCountAll + 1
if self.trialCountAll > self.prm['nPracticeTrials']:
self.trialCount = self.trialCount + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
if self.trialCountAll > self.prm['nPracticeTrials']:
self.correctCount = self.correctCount + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write(resp + '; ')
self.fullFileLines.append(resp + '; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
pcDone = self.trialCountAll / (self.prm['nTrials']+self.prm['nPracticeTrials']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCountAll >= (self.prm['nTrials'] + self.prm['nPracticeTrials']): # Block is completed
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
propCorr = self.correctCount/self.trialCount
dp = dprime_mAFC(propCorr, self.prm['nAlternatives'])
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('No. Correct = %d\n' %(self.correctCount))
ftyp.write('No. Total = %d\n' %(self.trialCount))
ftyp.write('Percent Correct = %5.2f \n' %(self.correctCount/self.trialCount))
ftyp.write('d-prime = %5.3f \n' %(dp))
ftyp.write('\n')
ftyp.flush()
ftyp.flush()
self.fullFile.flush()
self.fullFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
#'dprime condition listener session experimentLabel nCorrectA nTotalA nCorrectB nTotalB nCorrect nTotal date time duration block experiment'
resLineToWrite = ''
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.2f}'.format(self.correctCount/self.trialCount*100) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.correctCount) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.trialCount) + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['nIntervals']) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['nAlternatives']) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Constant m-Intervals n-Alternatives', resLineToWrite)
self.atBlockEnd()
else: #block is not finished, move on to next trial
self.doTrial()
def sortResponseMultipleConstantsMIntervalsNAlternatives(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.trialCount = {}
self.correctCount = {}
self.trialCountCnds = {}
self.correctCountCnds = {}
self.trialCountAllCnds = {}
for i in range(len(self.prm['conditions'])):
self.trialCountCnds[self.prm['conditions'][i]] = 0
self.correctCountCnds[self.prm['conditions'][i]] = 0
self.trialCountAllCnds[self.prm['conditions'][i]] = 0
self.trialCount[i] = 0
self.correctCount[i] = 0
self.trialCountAll = 0
self.trialCountAll = self.trialCountAll + 1
self.trialCountAllCnds[self.currentCondition] = self.trialCountAllCnds[self.currentCondition] + 1
if self.trialCountAllCnds[self.currentCondition] > self.prm['nPracticeTrials']:
self.trialCountCnds[self.currentCondition] = self.trialCountCnds[self.currentCondition] + 1
self.trialCount[self.prm['currentDifference']] = self.trialCount[self.prm['currentDifference']] + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
if self.trialCountAllCnds[self.currentCondition] > self.prm['nPracticeTrials']:#if self.trialCountAll > self.prm['nPracticeTrials']:
self.correctCountCnds[self.currentCondition] = self.correctCountCnds[self.currentCondition] + 1
self.correctCount[self.prm['currentDifference']] = self.correctCount[self.prm['currentDifference']] + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write(self.currentCondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentCondition + '; ' + resp + '; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
pcDone = self.trialCountAll / ((self.prm['nTrials'] + self.prm['nPracticeTrials'])*len(self.prm['conditions']))*100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCountAll >= (self.prm['nTrials'] + self.prm['nPracticeTrials'])*len(self.prm['conditions']): # Block is completed
totalCorrectCount = 0
totalTrialCount = 0
for i in range(len(self.prm['conditions'])):
totalTrialCount = totalTrialCount + self.trialCount[i]
totalCorrectCount = totalCorrectCount + self.correctCountCnds[self.prm['conditions'][i]]
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
dprimeList = []
for i in range(len(self.prm['conditions'])):
thisPropCorr = (self.correctCountCnds[self.prm['conditions'][i]])/self.trialCountCnds[self.prm['conditions'][i]]
thisdprime = dprime_mAFC(thisPropCorr, self.prm['nAlternatives'])
dprimeList.append(thisdprime)
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('CONDITION, ' + str(i+1) + '; ' + self.prm['conditions'][i] + '\n')
ftyp.write('No. Correct = %d\n' %(self.correctCountCnds[self.prm['conditions'][i]]))
ftyp.write('No. Total = %d \n' %((self.trialCountCnds[self.prm['conditions'][i]])))
ftyp.write('Percent Correct = %5.2f \n' %(thisPropCorr*100))
ftyp.write('d-prime = %5.3f \n' %(thisdprime))
ftyp.write('\n')
propCorrAll = totalCorrectCount/totalTrialCount
dprimeAll = dprime_mAFC(propCorrAll, self.prm['nAlternatives'])
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('CONDITION, ALL \n')
ftyp.write('No. Correct = %d\n' %(totalCorrectCount))
ftyp.write('No. Total = %d\n' %(totalTrialCount))
ftyp.write('Percent Correct = %5.2f \n' %(propCorrAll*100))
ftyp.write('d-prime = %5.3f \n' %(dprimeAll))
ftyp.write('\n.\n\n')
ftyp.flush()
self.fullFile.flush()
self.fullFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = ''
for i in range(len(self.prm['conditions'])):
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dprimeList[i]) + self.prm["pref"]["general"]["csvSeparator"] + \
'{0:5.2f}'.format((self.correctCountCnds[self.prm['conditions'][i]]*100)/self.trialCountCnds[self.prm['conditions'][i]]) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.correctCountCnds[self.prm['conditions'][i]]) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.trialCountCnds[self.prm['conditions'][i]]) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dprimeAll) + self.prm["pref"]["general"]["csvSeparator"] + \
str(totalCorrectCount/totalTrialCount*100) + self.prm["pref"]["general"]["csvSeparator"] + \
str(totalCorrectCount) + self.prm["pref"]["general"]["csvSeparator"] + \
str(totalTrialCount) + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['nIntervals']) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['nAlternatives']) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Multiple Constants m-Intervals n-Alternatives', resLineToWrite)
self.atBlockEnd()
else: #block is not finished, move on to next trial
remainingDifferences = []
for key in self.trialCount.keys():
if self.trialCount[key] < self.prm['nTrials']:
remainingDifferences.append(key)
self.prm['currentDifference'] = random.choice(remainingDifferences)
self.doTrial()
def sortResponseConstant1Interval2Alternatives(self, buttonClicked):
if self.prm['startOfBlock'] == True: #Initialize counts and data structures
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.correctCount = 0 #count of correct trials
self.trialCount = 0 #count of total trials
self.correctCountCnds = {} #count of correct trials by condition
self.trialCountCnds = {} #count of total trials by condition
for i in range(len(self.prm['conditions'])):
self.trialCountCnds[self.prm['conditions'][i]] = 0
self.correctCountCnds[self.prm['conditions'][i]] = 0
self.trialCountAll = 0
#Add one to trial counts
self.trialCountAll = self.trialCountAll + 1
if self.trialCountAll > self.prm['nPracticeTrials']:
self.trialCountCnds[self.currentCondition] = self.trialCountCnds[self.currentCondition] + 1
self.trialCount = self.trialCount + 1
#if correct response, add one to correct resp. count
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
if self.trialCountAll > self.prm["nPracticeTrials"]:
self.correctCountCnds[self.currentCondition] = self.correctCountCnds[self.currentCondition] + 1
self.correctCount = self.correctCount + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write(self.currentCondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentCondition + '; ' + resp + '; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
#move percent done bar
pcDone = self.trialCountAll/(self.prm['nTrials'] + self.prm['nPracticeTrials'])*100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
#Completed all trials, compute stats
if self.trialCountAll >= self.prm['nPracticeTrials'] + self.prm['nTrials']: # Block is completed
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
self.fullFile.flush()
self.fullFileLog.flush()
A_correct = self.correctCountCnds[self.prm['conditions'][0]]
A_total = self.trialCountCnds[self.prm['conditions'][0]]
B_correct = self.correctCountCnds[self.prm['conditions'][1]]
B_total = self.trialCountCnds[self.prm['conditions'][1]]
try:
dp = dprime_yes_no_from_counts(A_correct, A_total, B_correct, B_total, self.prm["pref"]['general']['dprimeCorrection'])
except:
dp = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('No. Correct = %d\n' %(self.correctCount))
ftyp.write('No. Total = %d\n' %(self.trialCount))
ftyp.write('Percent Correct = %5.2f \n' %(self.correctCount/self.trialCount*100))
ftyp.write("d-prime = %5.3f \n\n" %(dp))
for i in range(len(self.prm['conditions'])):
try:
thisPercentCorrect = (self.correctCountCnds[self.prm['conditions'][i]]*100)/self.trialCountCnds[self.prm['conditions'][i]]
except:
thisPercentCorrect = nan
ftyp.write('No. Correct Condition %s = %d\n' %(self.prm['conditions'][i], self.correctCountCnds[self.prm['conditions'][i]]))
ftyp.write('No. Total Condition %s = %d \n' %(self.prm['conditions'][i], self.trialCountCnds[self.prm['conditions'][i]]))
ftyp.write('Percent Correct Condition %s = %5.2f \n' %(self.prm['conditions'][i], thisPercentCorrect))
ftyp.write('\n\n')
ftyp.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
#'dprime condition listener session experimentLabel nCorrectA nTotalA nCorrectB nTotalB nCorrect nTotal date time duration block experiment'
resLineToWrite = '{0:5.3f}'.format(dp) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.trialCount) + self.prm["pref"]["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
resLineToWrite = resLineToWrite + str(self.correctCountCnds[self.prm['conditions'][i]]) + self.prm["pref"]["general"]["csvSeparator"] + \
str(self.trialCountCnds[self.prm['conditions'][i]]) + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Constant 1-Interval 2-Alternatives', resLineToWrite)
self.atBlockEnd()
else: #block is not finished, move on to next trial
self.doTrial()
def sortResponseMultipleConstants1Interval2Alternatives(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.trialCount = {}
self.correctCount = {}
self.trialCountCnds = {}
self.correctCountCnds = {}
self.trialCountAllCnds = {}
for i in range(len(self.prm['conditions'])):
self.trialCount[i] = 0
self.correctCount[i] = 0
self.trialCountCnds[self.prm['conditions'][i]] = {}
self.correctCountCnds[self.prm['conditions'][i]] = {}
self.trialCountAllCnds[self.prm['conditions'][i]] = 0
for j in range(len(self.prm['trialTypes'])):
self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]] = 0
self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]] = 0
self.trialCountAll = 0
self.trialCountAll = self.trialCountAll + 1
self.trialCountAllCnds[self.currentCondition] = self.trialCountAllCnds[self.currentCondition] + 1
if self.trialCountAllCnds[self.currentCondition] > self.prm['nPracticeTrials']:
self.trialCountCnds[self.currentCondition][self.currentSubcondition] = self.trialCountCnds[self.currentCondition][self.currentSubcondition] + 1
self.trialCount[self.prm['currentDifference']] = self.trialCount[self.prm['currentDifference']] + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
if self.trialCountAllCnds[self.currentCondition] > self.prm['nPracticeTrials']:
self.correctCountCnds[self.currentCondition][self.currentSubcondition] = self.correctCountCnds[self.currentCondition][self.currentSubcondition] + 1
self.correctCount[self.prm['currentDifference']] = self.correctCount[self.prm['currentDifference']] + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write(self.currentCondition + '; ' + self.currentSubcondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentCondition + '; ' + self.currentSubcondition + '; ' + resp + '; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
pcDone = (self.trialCountAll / ((self.prm['nTrials']+self.prm['nPracticeTrials']) * len(self.prm['conditions'])))*100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCountAll >= (self.prm['nTrials'] + self.prm['nPracticeTrials'])*len(self.prm['conditions']): # Block is completed
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
totalCorrectCount = 0
subconditionTrialCount = [0 for number in range(len(self.prm['trialTypes']))]
subconditionCorrectCount = [0 for number in range(len(self.prm['trialTypes']))]
A_correct = []
A_total = []
B_correct = []
B_total = []
dp = []
totalTrialCount = 0
for i in range(len(self.prm['conditions'])):
totalTrialCount = totalTrialCount + self.trialCount[i]
thisCondTotalCorrectCount = 0
for j in range(len(self.prm['trialTypes'])):
thisCondTotalCorrectCount = thisCondTotalCorrectCount + self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]
subconditionCorrectCount[j] = subconditionCorrectCount[j] + self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]
subconditionTrialCount[j] = subconditionTrialCount[j] + self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]
totalCorrectCount = totalCorrectCount + thisCondTotalCorrectCount
#compute d-prime for each condition
A_correct.append(self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][0]])
A_total.append(self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][0]])
B_correct.append(self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][1]])
B_total.append(self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][1]])
try:
this_dp = dprime_yes_no_from_counts(nCA=A_correct[i], nTA=A_total[i], nCB=B_correct[i], nTB=B_total[i], corr=self.prm['pref']['general']['dprimeCorrection'])
except:
this_dp = nan
dp.append(this_dp)
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('CONDITION: %d; %s \n' %(i+1, self.prm['conditions'][i]))
ftyp.write('No. Correct = %d\n' %(thisCondTotalCorrectCount))
ftyp.write('No. Total = %d\n' %(self.prm['nTrials']))
ftyp.write('Percent Correct = %5.2f \n' %(thisCondTotalCorrectCount/self.trialCount[i]*100))
ftyp.write("d-prime = %5.3f \n\n" %(this_dp))
for j in range(len(self.prm['trialTypes'])):
try:
thisPercentCorrect = self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]/self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]*100
except:
thisPercentCorrect = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('No. Correct Subcondition %s = %d\n' %(self.prm['trialTypes'][j], self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]))
ftyp.write('No. Total Subcondition %s = %d \n' %(self.prm['trialTypes'][j], self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]))
ftyp.write('Percent Correct Subcondition %s = %5.2f \n' %(self.prm['trialTypes'][j], thisPercentCorrect))
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
A_correct_ALL = subconditionCorrectCount[0]
A_total_ALL = subconditionTrialCount[0]
B_correct_ALL = subconditionCorrectCount[1]
B_total_ALL = subconditionTrialCount[1]
try:
dp_ALL = dprime_yes_no_from_counts(nCA=A_correct_ALL, nTA=A_total_ALL, nCB=B_correct_ALL, nTB=B_total_ALL, corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_ALL = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('CONDITION: ALL \n')
ftyp.write('No. Correct = %d\n' %(totalCorrectCount))
ftyp.write('No Total = %d\n' %(totalTrialCount))
ftyp.write('Percent Correct = %5.2f \n' %(totalCorrectCount/totalTrialCount*100))
ftyp.write("d-prime = %5.3f \n\n" %(dp_ALL))
for j in range(len(self.prm['trialTypes'])):
try:
thisPercentCorrect = subconditionCorrectCount[j]/subconditionTrialCount[j]*100
except:
thisPercentCorrect = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('No. Correct Subcondition %s = %d\n' %(self.prm['trialTypes'][j], subconditionCorrectCount[j]))
ftyp.write('No. Total Subcondition %s = %d \n' %(self.prm['trialTypes'][j], subconditionTrialCount[j]))
ftyp.write('Percent Correct Subcondition %s = %5.2f \n' %(self.prm['trialTypes'][j], thisPercentCorrect))
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('.\n\n')
self.resFile.flush()
self.resFileLog.write('.\n\n')
self.resFileLog.flush()
self.fullFile.flush()
self.fullFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
## #'dprime condition listener session experimentLabel nCorrectA nTotalA nCorrectB nTotalB nCorrect nTotal date time duration block experiment'
resLineToWrite = '{0:5.3f}'.format(dp_ALL) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(totalTrialCount) + self.prm['pref']["general"]["csvSeparator"]
for j in range(len(self.prm['trialTypes'])):
resLineToWrite = resLineToWrite + str(subconditionCorrectCount[j]) + self.prm['pref']["general"]["csvSeparator"] + \
str(subconditionTrialCount[j]) + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp[i]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][0]] + self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][1]]) + self.prm['pref']["general"]["csvSeparator"]
for j in range(len(self.prm['trialTypes'])):
resLineToWrite = resLineToWrite + str(self.correctCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.trialCountCnds[self.prm['conditions'][i]][self.prm['trialTypes'][j]]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Multiple Constants 1-Interval 2-Alternatives', resLineToWrite)
self.atBlockEnd()
else: #block is not finished, move on to next trial
remainingDifferences = []
for key in self.trialCount.keys():
if self.trialCount[key] < self.prm['nTrials']:
remainingDifferences.append(key)
self.prm['currentDifference'] = random.choice(remainingDifferences)
self.doTrial()
def sortResponseConstant1PairSameDifferent(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.trialCount = 0
self.trialCountCnds = {}
self.correctCountCnds = {}
for i in range(len(self.prm['conditions'])):
self.trialCountCnds[self.prm['conditions'][i]] = 0
self.correctCountCnds[self.prm['conditions'][i]] = 0
self.trialCountAll = 0 #this includes as well the practice trials
self.trialCountAll = self.trialCountAll + 1
if self.trialCountAll > self.prm['nPracticeTrials']:
self.trialCountCnds[self.currentCondition] = self.trialCountCnds[self.currentCondition] + 1
self.trialCount = self.trialCount + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
if self.trialCountAll > self.prm['nPracticeTrials']:
self.correctCountCnds[self.currentCondition] = self.correctCountCnds[self.currentCondition] + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write(self.currentCondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentCondition + '; ' + resp + '; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
cnt = 0
for i in range(len(self.prm['conditions'])):
cnt = cnt + self.trialCountCnds[self.prm['conditions'][i]]
pcDone = cnt / self.prm['nTrials'] * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCountAll >= self.prm['nTrials'] + self.prm['nPracticeTrials']: # Block is completed
totalCorrectCount = 0
for i in range(len(self.prm['conditions'])):
totalCorrectCount = totalCorrectCount + self.correctCountCnds[self.prm['conditions'][i]]
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
self.fullFile.flush()
self.fullFileLog.flush()
A_correct = self.correctCountCnds[self.prm['conditions'][0]]
A_total = self.trialCountCnds[self.prm['conditions'][0]]
B_correct = self.correctCountCnds[self.prm['conditions'][1]]
B_total = self.trialCountCnds[self.prm['conditions'][1]]
try:
dp_IO = dprime_SD_from_counts(nCA=A_correct, nTA=A_total, nCB=B_correct, nTB=B_total, meth='IO', corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_IO = nan
try:
dp_diff = dprime_SD_from_counts(nCA=A_correct, nTA=A_total, nCB=B_correct, nTB=B_total, meth='diff', corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_diff = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write('No. Correct = %d\n' %(totalCorrectCount))
ftyp.write('No. Total = %d\n' %(self.trialCount))
ftyp.write('Percent Correct = %5.2f \n' %(totalCorrectCount/self.trialCount*100))
ftyp.write("d-prime IO = %5.3f \n" %(dp_IO))
ftyp.write("d-prime diff = %5.3f \n\n" %(dp_diff))
for i in range(len(self.prm['conditions'])):
try:
thisPercentCorrect = (self.correctCountCnds[self.prm['conditions'][i]]*100)/self.trialCountCnds[self.prm['conditions'][i]]
except:
thisPercentCorrect = nan
ftyp.write('No. Correct Condition %s = %d\n' %(self.prm['conditions'][i], self.correctCountCnds[self.prm['conditions'][i]]))
ftyp.write('No. Total Condition %s = %d \n' %(self.prm['conditions'][i], self.trialCountCnds[self.prm['conditions'][i]]))
ftyp.write('Percent Correct Condition %s= %5.2f \n' %(self.prm['conditions'][i], thisPercentCorrect))
ftyp.write('\n\n')
ftyp.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
#'dprime condition listener session experimentLabel nCorrectA nTotalA nCorrectB nTotalB nCorrect nTotal date time duration block experiment'
resLineToWrite = '{0:5.3f}'.format(dp_IO) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp_diff) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.trialCount) + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
resLineToWrite = resLineToWrite + str(self.correctCountCnds[self.prm['conditions'][i]]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.trialCountCnds[self.prm['conditions'][i]]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Constant 1-Pair Same/Different', resLineToWrite)
self.atBlockEnd()
else: #block is not finished, move on to next trial
self.doTrial()
def sortResponseMultipleConstants1PairSameDifferent(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.fullFileSummLines = []
self.trialCount = {} #trial count by difference, excluding practice trials
self.trialCountCnds = {} #trial count by difference and condition, excluding practice trials
self.correctCountCnds = {}
self.trialCountAll = {} #this includes also the practice trials
for j in range(self.prm['nDifferences']):
self.trialCount[j] = 0
self.trialCountCnds[j] = {}
self.correctCountCnds[j] = {}
for i in range(len(self.prm['conditions'])):
self.trialCountCnds[j][self.prm['conditions'][i]] = 0
self.correctCountCnds[j][self.prm['conditions'][i]] = 0
self.trialCountAll[j] = 0
self.currentDifferenceName = self.prm['differenceNames'][self.prm['currentDifference']]
self.trialCountAll[self.prm['currentDifference']] = self.trialCountAll[self.prm['currentDifference']] + 1
if self.trialCountAll[self.prm['currentDifference']] > self.prm['nPracticeTrials']:
self.trialCountCnds[self.prm['currentDifference']][self.currentCondition] = self.trialCountCnds[self.prm['currentDifference']][self.currentCondition] + 1
self.trialCount[self.prm['currentDifference']] = self.trialCount[self.prm['currentDifference']] + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
if self.trialCountAll[self.prm['currentDifference']] > self.prm['nPracticeTrials']:
self.correctCountCnds[self.prm['currentDifference']][self.currentCondition] = self.correctCountCnds[self.prm['currentDifference']][self.currentCondition] + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write( self.currentDifferenceName + '_' + self.stim1 + '-' + self.stim2 + '_' + self.currentCondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentDifferenceName + '_' + self.stim1 + '-' + self.stim2 + '_' + self.currentCondition + '; ' + resp + '; ')
self.fullFileSummLines.append([self.currentDifferenceName + self.prm['pref']["general"]["csvSeparator"] +
self.stim1 + self.prm['pref']["general"]["csvSeparator"] +
self.stim2 + self.prm['pref']["general"]["csvSeparator"] +
self.currentCondition + self.prm['pref']["general"]["csvSeparator"] +
resp + self.prm['pref']["general"]["csvSeparator"]])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]) + self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
cnt = 0
for j in range(self.prm['nDifferences']):
cnt = cnt + self.trialCountAll[j]
pcDone = cnt / ((self.prm['nTrials']+self.prm['nPracticeTrials']) *self.prm['nDifferences']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount[self.prm['currentDifference']] == self.prm['nTrials']:
self.prm['differenceChoices'].remove(self.currentDifferenceName)
# print('Trial Count:', self.trialCount)
# print('Trial Count All:', self.trialCountAll)
# print('Difference Choices:', self.prm['differenceChoices'])
# print(self.currentDifferenceName)
if len(self.prm['differenceChoices']) == 0:
totalCorrectCount = {}
for j in range(self.prm['nDifferences']):
totalCorrectCount[j] = 0
for i in range(len(self.prm['conditions'])):
totalCorrectCount[j] = totalCorrectCount[j] + self.correctCountCnds[j][self.prm['conditions'][i]]
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
self.fullFile.flush()
self.fullFileLog.flush()
A_correct = {}; A_total = {}; B_correct = {}; B_total = {}
dp_IO = {}; dp_diff = {}
for j in range(self.prm['nDifferences']):
A_correct[j] = self.correctCountCnds[j][self.prm['conditions'][0]]
A_total[j] = self.trialCountCnds[j][self.prm['conditions'][0]]
B_correct[j] = self.correctCountCnds[j][self.prm['conditions'][1]]
B_total[j] = self.trialCountCnds[j][self.prm['conditions'][1]]
try:
dp_IO[j] = dprime_SD_from_counts(nCA=A_correct[j], nTA=A_total[j], nCB=B_correct[j], nTB=B_total[j], meth='IO', corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_IO[j] = nan
try:
dp_diff[j] = dprime_SD_from_counts(nCA=A_correct[j], nTA=A_total[j], nCB=B_correct[j], nTB=B_total[j], meth='diff', corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_diff[j] = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write("DIFFERENCE: " + self.prm['differenceNames'][j] + '\n')
ftyp.write('No. Correct = %d\n' %(totalCorrectCount[j]))
ftyp.write('No. Total = %d\n' %(self.trialCount[j]))
ftyp.write('Percent Correct = %5.2f \n' %(totalCorrectCount[j]/self.trialCount[j]*100))
ftyp.write("d-prime IO = %5.3f \n" %(dp_IO[j]))
ftyp.write("d-prime diff = %5.3f \n\n" %(dp_diff[j]))
for i in range(len(self.prm['conditions'])):
try:
thisPercentCorrect = (self.correctCountCnds[j][self.prm['conditions'][i]]*100)/self.trialCountCnds[j][self.prm['conditions'][i]]
except:
thisPercentCorrect = nan
ftyp.write('No. Correct Condition %s = %d\n' %(self.prm['conditions'][i], self.correctCountCnds[j][self.prm['conditions'][i]]))
ftyp.write('No. Total Condition %s = %d \n' %(self.prm['conditions'][i], self.trialCountCnds[j][self.prm['conditions'][i]]))
ftyp.write('Percent Correct Condition %s = %5.2f \n' %(self.prm['conditions'][i], thisPercentCorrect))
ftyp.write('\n\n')
ftyp.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
#'dprime condition listener session experimentLabel nCorrectA nTotalA nCorrectB nTotalB nCorrect nTotal date time duration block experiment'
resLineToWrite = ''
for j in range(self.prm['nDifferences']):
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp_IO[j]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp_diff[j]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.trialCount[j]) + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
resLineToWrite = resLineToWrite + str(self.correctCountCnds[j][self.prm['conditions'][i]]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.trialCountCnds[j][self.prm['conditions'][i]]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Multiple Constants 1-Pair Same/Different', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('Multiple Constants 1-Pair Same/Different', resLineToWriteSummFull)
self.atBlockEnd()
else: #block is not finished, move on to next trial
self.doTrial()
def sortResponseMultipleConstantsABX(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.fullFileSummLines = []
self.trialCount = {} #trial count by difference, excluding practice trials
self.trialCountCnds = {} #trial count by difference and condition, excluding practice trials
self.correctCountCnds = {}
self.trialCountAll = {} #this includes also the practice trials
for j in range(self.prm['nDifferences']):
self.trialCount[j] = 0
self.trialCountCnds[j] = {}
self.correctCountCnds[j] = {}
for i in range(len(self.prm['conditions'])):
self.trialCountCnds[j][self.prm['conditions'][i]] = 0
self.correctCountCnds[j][self.prm['conditions'][i]] = 0
self.trialCountAll[j] = 0
self.currentDifferenceName = self.prm['differenceNames'][self.prm['currentDifference']]
self.trialCountAll[self.prm['currentDifference']] = self.trialCountAll[self.prm['currentDifference']] + 1
if self.trialCountAll[self.prm['currentDifference']] > self.prm['nPracticeTrials']:
self.trialCountCnds[self.prm['currentDifference']][self.currentCondition] = self.trialCountCnds[self.prm['currentDifference']][self.currentCondition] + 1
self.trialCount[self.prm['currentDifference']] = self.trialCount[self.prm['currentDifference']] + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('correct')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
if self.trialCountAll[self.prm['currentDifference']] > self.prm['nPracticeTrials']:
self.correctCountCnds[self.prm['currentDifference']][self.currentCondition] = self.correctCountCnds[self.prm['currentDifference']][self.currentCondition] + 1
resp = '1'
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback('incorrect')
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback('neutral')
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback('off')
resp = '0'
self.fullFileLog.write( self.currentDifferenceName + '_' + self.stim1 + '-' + self.stim2 + '_' + self.stim3 + '_' + self.currentCondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentDifferenceName + '_' + self.stim1 + '-' + self.stim2 + '_' + self.stim3 + '_' + self.currentCondition + '; ' + resp + '; ')
self.fullFileSummLines.append([self.currentDifferenceName + self.prm['pref']["general"]["csvSeparator"] +
self.stim1 + self.prm['pref']["general"]["csvSeparator"] +
self.stim2 + self.prm['pref']["general"]["csvSeparator"] +
self.stim3 + self.prm['pref']["general"]["csvSeparator"] +
self.currentCondition + self.prm['pref']["general"]["csvSeparator"] +
resp + self.prm['pref']["general"]["csvSeparator"]])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]) + self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
cnt = 0
for j in range(self.prm['nDifferences']):
cnt = cnt + self.trialCountAll[j]
pcDone = cnt / ((self.prm['nTrials']+self.prm['nPracticeTrials']) *self.prm['nDifferences']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount[self.prm['currentDifference']] == self.prm['nTrials']:
self.prm['differenceChoices'].remove(self.currentDifferenceName)
if len(self.prm['differenceChoices']) == 0:
totalCorrectCount = {}
for j in range(self.prm['nDifferences']):
totalCorrectCount[j] = 0
for i in range(len(self.prm['conditions'])):
totalCorrectCount[j] = totalCorrectCount[j] + self.correctCountCnds[j][self.prm['conditions'][i]]
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
self.fullFile.flush()
self.fullFileLog.flush()
A_correct = {}; A_total = {}; B_correct = {}; B_total = {}
dp_IO = {}; dp_diff = {}
for j in range(self.prm['nDifferences']):
A_correct[j] = self.correctCountCnds[j][self.prm['conditions'][0]]
A_total[j] = self.trialCountCnds[j][self.prm['conditions'][0]]
B_correct[j] = self.correctCountCnds[j][self.prm['conditions'][1]]
B_total[j] = self.trialCountCnds[j][self.prm['conditions'][1]]
try:
dp_IO[j] = dprime_ABX_from_counts(nCA=A_correct[j], nTA=A_total[j], nCB=B_correct[j], nTB=B_total[j], meth='IO', corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_IO[j] = nan
try:
dp_diff[j] = dprime_ABX_from_counts(nCA=A_correct[j], nTA=A_total[j], nCB=B_correct[j], nTB=B_total[j], meth='diff', corr=self.prm['pref']['general']['dprimeCorrection'])
except:
dp_diff[j] = nan
for ftyp in [self.resFile, self.resFileLog]:
ftyp.write("DIFFERENCE: " + self.prm['differenceNames'][j] + '\n')
ftyp.write('No. Correct = %d\n' %(totalCorrectCount[j]))
ftyp.write('No. Total = %d\n' %(self.trialCount[j]))
ftyp.write('Percent Correct = %5.2f \n' %(totalCorrectCount[j]/self.trialCount[j]*100))
ftyp.write("d-prime IO = %5.3f \n" %(dp_IO[j]))
ftyp.write("d-prime diff = %5.3f \n\n" %(dp_diff[j]))
for i in range(len(self.prm['conditions'])):
try:
thisPercentCorrect = (self.correctCountCnds[j][self.prm['conditions'][i]]*100)/self.trialCountCnds[j][self.prm['conditions'][i]]
except:
thisPercentCorrect = nan
ftyp.write('No. Correct Condition %s = %d\n' %(self.prm['conditions'][i], self.correctCountCnds[j][self.prm['conditions'][i]]))
ftyp.write('No. Total Condition %s = %d \n' %(self.prm['conditions'][i], self.trialCountCnds[j][self.prm['conditions'][i]]))
ftyp.write('Percent Correct Condition %s = %5.2f \n' %(self.prm['conditions'][i], thisPercentCorrect))
ftyp.write('\n\n')
ftyp.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
#'dprime condition listener session experimentLabel nCorrectA nTotalA nCorrectB nTotalB nCorrect nTotal date time duration block experiment'
resLineToWrite = ''
for j in range(self.prm['nDifferences']):
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp_IO[j]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + '{0:5.3f}'.format(dp_diff[j]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.trialCount[j]) + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
resLineToWrite = resLineToWrite + str(self.correctCountCnds[j][self.prm['conditions'][i]]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.trialCountCnds[j][self.prm['conditions'][i]]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Multiple Constants ABX', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('Multiple Constants ABX', resLineToWriteSummFull)
self.atBlockEnd()
else: #block is not finished, move on to next trial
self.doTrial()
def sortResponsePEST(self, buttonClicked):
#PEST SUPPORT IS EXPERIMENTAL, THE PROCEDURE IS VERY LITTLE TESTED!
if self.prm['startOfBlock'] == True:
self.prm['correctCount'] = 0
self.prm['startOfBlock'] = False
self.prm['currStepSize'] = copy.copy(self.prm['initialStepSize'])
self.prm['nTrialsCurrLev'] = 0
self.prm['nSteps'] = 0
self.prm['lastStepDoubled'] = False
self.prm['stepBeforeLastReversalDoubled'] = False
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
if self.prm['corrTrackDir'] == self.tr("Down"):
self.prm['corrTrackSign'] = -1
self.prm['incorrTrackSign'] = 1
self.prm['incorrTrackDir'] = self.tr("Up")
else:
self.prm['corrTrackSign'] = 1
self.prm['incorrTrackSign'] = -1
self.prm['incorrTrackDir'] = self.tr("Down")
self.fullFileLines = []
self.fullFileSummLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
#increment number of trials
self.prm['nTrialsCurrLev'] = self.prm['nTrialsCurrLev'] +1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('1; ')
self.fullFileLines.append('1; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('1' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['correctCount'] = self.prm['correctCount'] + 1
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm["pref"]["general"]["csvSeparator"]])
self.fullFileLog.write('0; ')
self.fullFileLines.append('0; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('0' + self.prm["pref"]["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm["pref"]["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
#perform test
# the expected number of correct responses at threshold is Pt*N
#where Pt is the proportion correct tracked (e.g. 0.75), and N is the
#number of trials run at the given level. If the number of correct
#responses obtained is decidedly larger than the number of expected
#correct responses at threshold then the track goes down. If the
# number of correct responses obtained is decidedly smaller than
#the number of expected correct responses at threshold then the
#track goes up. But how much larger/smaller should the number of correct
#responses be to be considered decidedly larger or smaller than the
#expected proportion? This is governed by the parameter W, which defines
#tolerance limits on the expecetd number of correct responses at threshold.
#If W is small, the tolerance limits are small, and the track moves quickly to
#another value. If W is large, the tolerance limits are large, and more evidence
#needs to be collected before moving the track to a different value.
expectedNCorrect = self.prm['percentCorrectTracked']/100*self.prm['nTrialsCurrLev']
print('Correct count: ', self.prm['correctCount'])
print('ExpectedNCorrect: ', expectedNCorrect)
newStepSize = copy.copy(self.prm['currStepSize'])#temporary, it will be changed later if necessary
if self.prm['correctCount'] >= expectedNCorrect + self.prm['W']:
print("self.prm['correctCount'] >= expectedNCorrect + self.prm['W']")
if self.prm['trackDir'] == self.prm['incorrTrackDir']: #CALL REVERSAL
self.prm['trackDir'] = copy.copy(self.prm['corrTrackDir'])
newStepSize = self.prm['currStepSize']/2 #halve step size at reversal (Rule 1)
if self.prm['lastStepDoubled'] == True: #(see Rule 3)
self.prm['stepBeforeLastReversalDoubled'] = True
self.prm['lastStepDoubled'] = False #we just reversed so we didn't double step
self.prm['nSteps'] = 1 #re-initialize step counter. Should this be 1?
elif self.prm['trackDir'] == self.prm['corrTrackDir']:
self.prm['nSteps'] = self.prm['nSteps'] + 1
if self.prm['nSteps'] < 3:
self.prm['lastStepDoubled'] = False
elif self.prm['nSteps'] == 3:
if self.prm['stepBeforeLastReversalDoubled'] == False:
newStepSize = self.prm['currStepSize']*2
self.prm['lastStepDoubled'] = True
else:
self.prm['lastStepDoubled'] = False
elif self.prm['nSteps'] > 3:
newStepSize = self.prm['currStepSize']*2
self.prm['lastStepDoubled'] = True
#limit maximum step size
if newStepSize > self.prm['maxStepSize']:
newStepSize = self.prm['maxStepSize']
self.prm['nTrialsCurrLev'] = 0
self.prm['correctCount'] = 0
self.prm['currStepSize'] = newStepSize
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (self.prm['currStepSize']*self.prm['corrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (self.prm['currStepSize']**self.prm['corrTrackSign'])
elif self.prm['correctCount'] <= expectedNCorrect - self.prm['W']:
print("self.prm['correctCount'] <= expectedNCorrect - self.prm['W']")
if self.prm['trackDir'] == self.prm['corrTrackDir']: #CALL REVERSAL
self.prm['trackDir'] = copy.copy(self.prm['incorrTrackDir'])
newStepSize = self.prm['currStepSize']/2 #halve step size at reversal
if self.prm['lastStepDoubled'] == True:
self.prm['stepBeforeLastReversalDoubled'] = True
self.prm['lastStepDoubled'] = False
self.prm['nSteps'] = 1 #re-initialize counter. Should this be 1?
elif self.prm['trackDir'] == self.prm['incorrTrackDir']:
self.prm['nSteps'] = self.prm['nSteps'] + 1
if self.prm['nSteps'] < 3:
self.prm['lastStepDoubled'] = False
elif self.prm['nSteps'] == 3:
if self.prm['stepBeforeLastReversalDoubled'] == False:
newStepSize = self.prm['currStepSize']*2
self.prm['lastStepDoubled'] = True
else:
self.prm['lastStepDoubled'] = False
elif self.prm['nSteps'] > 3:
newStepSize = self.prm['currStepSize']*2
self.prm['lastStepDoubled'] = True
#limit maximum step size
if newStepSize > self.prm['maxStepSize']:
newStepSize = self.prm['maxStepSize']
self.prm['nTrialsCurrLev'] = 0
self.prm['correctCount'] = 0
self.prm['currStepSize'] = newStepSize
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] + (self.prm['currStepSize']*self.prm['incorrTrackSign'])
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.prm['adaptiveParam'] = self.prm['adaptiveParam'] * (self.prm['currStepSize']**self.prm['incorrTrackSign'])
print("Adaptive Difference")
print(self.prm['adaptiveParam'])
print("Current step: ")
print(self.prm['currStepSize'])
print("nSteps")
print(self.prm['nSteps'])
self.fullFileLog.flush()
pcDone = 0#(self.prm['nTurnpoints'] / self.prm['totalTurnpoints']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.prm['currStepSize'] < self.prm['minStepSize']:
self.writeResultsHeader('standard')
#process results
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
if self.prm['adaptiveType'] == self.tr("Arithmetic"):
self.resFile.write('\n\n')
self.resFile.write('Threshold = %5.2f \n' %(self.prm['adaptiveParam']))
self.resFileLog.write('\n\n')
self.resFileLog.write('Threshold = %5.2f \n' %(self.prm['adaptiveParam']))
elif self.prm['adaptiveType'] == self.tr("Geometric"):
self.resFile.write('\n\n')
self.resFile.write('Geometric Threshold = %5.2f \n' %(self.prm['adaptiveParam']))
self.resFileLog.write('\n\n')
self.resFileLog.write('Geometric Threshold = %5.2f \n' %(self.prm['adaptiveParam']))
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('PEST', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm["pref"]["general"]["csvSeparator"] + \
durString + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm["pref"]["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm["pref"]["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm["pref"]["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('PEST', resLineToWriteSummFull)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseMaximumLikelihood(self, buttonClicked):
if self.prm['startOfBlock'] == True:
if self.prm['psyFunLogScale'] == "Yes":
scl = "log"
elif self.prm['psyFunLogScale'] == "No":
scl = "linear"
self.prm['MLMidPointGrid'] = stimSpacingGrid(self.prm['psyFunLoMidPoint'], self.prm['psyFunHiMidPoint'], self.prm['psyFunMidPointStep'], scl)
self.prm['MLLikelihood'] = numpy.zeros(len(self.prm['MLMidPointGrid']))
self.prm['startOfBlock'] = False
self.prm['MLStimLevels'] = []
self.prm['MLResponses'] = []
self.trialCount = 0
self.fullFileLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
currBlock = 'b' + str(self.prm['currentBlock'])
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
if buttonClicked == self.correctButton:
self.prm['MLResponses'].append(1)
response = 1
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
elif buttonClicked != self.correctButton:
self.prm['MLResponses'].append(0)
response = 0
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLog.write(str(response)+'; ')
self.fullFileLines.append(str(response)+'; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.prm['MLStimLevels'].append(self.prm['adaptiveParam'])
if self.prm['psyFunLogScale'] == "No":
ll = logisticLikelihood(self.prm['adaptiveParam'], response, self.prm['MLMidPointGrid'],
self.prm['psyFunSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm['psyFunLapseRate'])
self.prm['MLLikelihood'] = self.prm['MLLikelihood'] + ll
mlIdx = numpy.where( self.prm['MLLikelihood']==max(self.prm['MLLikelihood']))[0]
self.prm['adaptiveParam'] = invLogistic(self.prm['percentCorrectTracked']/100,
self.prm['MLMidPointGrid'][mlIdx],
self.prm['psyFunSlope'], 1/self.prm[currBlock]['nAlternatives'],
self.prm['psyFunLapseRate'])[0]
elif self.prm['psyFunLogScale'] == "Yes":
ll = logisticLikelihood(log(self.prm['adaptiveParam']), response, log(self.prm['MLMidPointGrid']),
exp(self.prm['psyFunSlope']), 1/self.prm[currBlock]['nAlternatives'],
self.prm['psyFunLapseRate'])
self.prm['MLLikelihood'] = self.prm['MLLikelihood'] + ll
mlIdx = numpy.where( self.prm['MLLikelihood']==max(self.prm['MLLikelihood']))[0]
self.prm['adaptiveParam'] = exp(invLogistic(self.prm['percentCorrectTracked']/100,
log(self.prm['MLMidPointGrid'][mlIdx]),
exp(self.prm['psyFunSlope']),
1/self.prm[currBlock]['nAlternatives'],
self.prm['psyFunLapseRate'])[0])
#print(self.prm['adaptiveParam'])
self.trialCount = self.trialCount +1
self.fullFileLog.flush()
pcDone = (self.trialCount / self.prm['nTrials']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount == self.prm['nTrials']:
self.writeResultsHeader('standard')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
for i in range(len(self.prm['MLStimLevels'])):
self.resFile.write('%5.2f ' %self.prm['MLStimLevels'][i])
self.resFileLog.write('%5.2f ' %self.prm['MLStimLevels'][i])
self.resFile.write('\n\n')
self.resFile.write('threshold = %5.2f \n' %(self.prm['MLStimLevels'][-1]))
self.resFileLog.write('\n\n')
self.resFileLog.write('threshold = %5.2f \n' %(self.prm['MLStimLevels'][-1]))
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(self.prm['MLStimLevels'][-1]) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Maximum Likelihood', resLineToWrite)
self.atBlockEnd()
else:
self.doTrial()
def sortResponsePSI(self, buttonClicked):
currBlock = 'b' + str(self.prm['currentBlock'])
if self.prm['startOfBlock'] == True:
self.fullFileLines = []
self.fullFileSummLines = []
if self.prm['margThresh'] == "Yes" or self.prm['margSlope'] == "Yes" or self.prm['margLapse'] == "Yes":
ax = np.array([])
if self.prm['margThresh'] == "Yes":
ax = numpy.append(ax, 0)
if self.prm['margSlope'] == "Yes":
ax = numpy.append(ax, 1)
if self.prm['margLapse'] == "Yes":
ax = numpy.append(ax, 2)
ax = tuple(np.sort(ax))
else:
ax = None
gammax = 1/self.prm[currBlock]['nAlternatives']
if self.prm['stimScale'] == "Linear":
self.PSI = setupPSI(model=self.prm['psyFunType'],
x0=self.prm['adaptiveParam'],
xLim=(self.prm['stimLo'], self.prm['stimHi']),
xStep=self.prm['stimStep'],
stimScale=self.prm['stimScale'],
alphaLim=(self.prm['loMidPoint'], self.prm['hiMidPoint']),
alphaStep=self.prm['midPointStep'],
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=self.prm['midPointPriorMu'],
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'],self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gamma=gammax,
lambdaLim=(self.prm['loLapse'],self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
marginalize = ax)
elif self.prm['stimScale'] == "Logarithmic":
self.PSI = setupPSI(model=self.prm['psyFunType'],
x0=abs(self.prm['adaptiveParam']),
xLim=(abs(self.prm['stimLo']), abs(self.prm['stimHi'])),
xStep=self.prm['stimStep'],
stimScale=self.prm['stimScale'],
alphaLim=(abs(self.prm['loMidPoint']), abs(self.prm['hiMidPoint'])),
alphaStep=self.prm['midPointStep'],
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=abs(self.prm['midPointPriorMu']),
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'],self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gamma=gammax,
lambdaLim=(self.prm['loLapse'],self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
marginalize = ax)
self.prm['startOfBlock'] = False
self.trialCount = 0
self.fullFileLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
if buttonClicked == self.correctButton:
response = 1
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
elif buttonClicked != self.correctButton:
response = 0
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"]])
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLog.write(str(response)+'; ')
self.fullFileLines.append(str(response)+'; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(response) + self.prm['pref']["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.trialCount = self.trialCount +1
self.fullFileLog.flush()
pcDone = (self.trialCount / self.prm['nTrials']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount == self.prm['nTrials']:
self.writeResultsHeader('standard')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
self.resFile.write('Midpoint = %5.3f ' %self.PSI['est_midpoint'])
self.resFileLog.write('Midpoint = %5.3f ' %self.PSI['est_midpoint'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Slope = %5.3f ' %self.PSI['est_slope'])
self.resFileLog.write('Slope = %5.3f ' %self.PSI['est_slope'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Lapse = %5.3f ' %self.PSI['est_lapse'])
self.resFileLog.write('Lapse = %5.3f ' %self.PSI['est_lapse'])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(self.PSI['est_midpoint']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.PSI['est_slope']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.PSI['est_lapse']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:d}'.format(int(self.prm["nTrials"])) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('PSI', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('PSI', resLineToWriteSummFull)
del self.PSI #clear memory
self.atBlockEnd()
else:
self.PSI = PSI_update(self.PSI, response)
if self.prm['stimScale'] == "Logarithmic":
if self.prm['adaptiveParam'] >=0:
self.prm['adaptiveParam'] = self.PSI["xnextLinear"]
else:
self.prm['adaptiveParam'] = -self.PSI["xnextLinear"]
else:
self.prm['adaptiveParam'] = self.PSI["xnextLinear"]
# print("Est. thresh: " + str(self.PSI['est_midpoint']))
# print('Next Stim: ' + str(self.prm['adaptiveParam']))
# print(self.PSI["phi"])
self.doTrial()
#PSI Est. Guess Rate
def sortResponsePSIEstGuessRate(self, buttonClicked):
currBlock = 'b' + str(self.prm['currentBlock'])
if self.prm['startOfBlock'] == True:
self.fullFileLines = []
self.fullFileSummLines = []
if self.prm['margThresh'] == "Yes" or self.prm['margSlope'] == "Yes" or self.prm['margGuess'] == "Yes" or self.prm['margLapse'] == "Yes":
ax = np.array([])
if self.prm['margThresh'] == "Yes":
ax = numpy.append(ax, 0)
if self.prm['margSlope'] == "Yes":
ax = numpy.append(ax, 1)
if self.prm['margGuess'] == "Yes":
ax = numpy.append(ax, 2)
if self.prm['margLapse'] == "Yes":
ax = numpy.append(ax, 3)
ax = tuple(np.sort(ax))
else:
ax = None
if self.prm['stimScale'] == "Linear":
self.PSI = setupPSIEstGuessRate(model=self.prm['psyFunType'],
x0=self.prm['adaptiveParam'],
xLim=(self.prm['stimLo'], self.prm['stimHi']),
xStep=self.prm['stimStep'],
stimScale=self.prm['stimScale'],
alphaLim=(self.prm['loMidPoint'], self.prm['hiMidPoint']),
alphaStep=self.prm['midPointStep'],
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=self.prm['midPointPriorMu'],
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'],self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gammaLim=(self.prm['loGuess'], self.prm['hiGuess']),
gammaStep=self.prm['guessStep'],
gammaSpacing=self.prm['guessSpacing'],
gammaDist=self.prm['guessPrior'],
gammaMu=self.prm['guessPriorMu'],
gammaSTD=self.prm['guessPriorSTD'],
lambdaLim=(self.prm['loLapse'],self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
marginalize = ax)
elif self.prm['stimScale'] == "Logarithmic":
self.PSI = setupPSIEstGuessRate(model=self.prm['psyFunType'],
x0=abs(self.prm['adaptiveParam']),
xLim=(abs(self.prm['stimLo']), abs(self.prm['stimHi'])),
xStep=self.prm['stimStep'],
stimScale=self.prm['stimScale'],
alphaLim=(abs(self.prm['loMidPoint']), abs(self.prm['hiMidPoint'])),
alphaStep=self.prm['midPointStep'],
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=abs(self.prm['midPointPriorMu']),
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'],self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gammaLim=(self.prm['loGuess'], self.prm['hiGuess']),
gammaStep=self.prm['guessStep'],
gammaSpacing=self.prm['guessSpacing'],
gammaDist=self.prm['guessPrior'],
gammaMu=self.prm['guessPriorMu'],
gammaSTD=self.prm['guessPriorSTD'],
lambdaLim=(self.prm['loLapse'],self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
marginalize = ax)
self.prm['startOfBlock'] = False
self.trialCount = 0
self.fullFileLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
if buttonClicked == self.prm["YesButton"]:
response = 1 #here response indicate whether listener said "Yes", or equivalent, not whether the response was correct or not
else:
response = 0
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"]])
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLog.write(str(response)+'; ')
self.fullFileLines.append(str(response)+'; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(response) + self.prm['pref']["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.trialCount = self.trialCount +1
self.fullFileLog.flush()
pcDone = (self.trialCount / self.prm['nTrials']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount == self.prm['nTrials']:
self.writeResultsHeader('standard')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
self.resFile.write('Midpoint = %5.3f ' %self.PSI['est_midpoint'])
self.resFileLog.write('Midpoint = %5.3f ' %self.PSI['est_midpoint'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Slope = %5.3f ' %self.PSI['est_slope'])
self.resFileLog.write('Slope = %5.3f ' %self.PSI['est_slope'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Guess = %5.3f ' %self.PSI['est_guess'])
self.resFileLog.write('Guess = %5.3f ' %self.PSI['est_guess'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Lapse = %5.3f ' %self.PSI['est_lapse'])
self.resFileLog.write('Lapse = %5.3f ' %self.PSI['est_lapse'])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(self.PSI['est_midpoint']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.PSI['est_guess']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.PSI['est_slope']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.PSI['est_lapse']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:d}'.format(int(self.prm["nTrials"])) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('PSI - Est. Guess Rate', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('PSI - Est. Guess Rate', resLineToWriteSummFull)
del self.PSI #clear memory
self.atBlockEnd()
else:
self.PSI = PSIEstGuessRate_update(self.PSI, response)
if self.prm['stimScale'] == "Logarithmic":
if self.prm['adaptiveParam'] >=0:
self.prm['adaptiveParam'] = self.PSI["xnextLinear"]
else:
self.prm['adaptiveParam'] = -self.PSI["xnextLinear"]
else:
self.prm['adaptiveParam'] = self.PSI["xnextLinear"]
# print("Est. thresh: " + str(self.PSI['est_midpoint']))
# print('Next Stim: ' + str(self.prm['adaptiveParam']))
# print(self.PSI["phi"])
self.doTrial()
def sortResponseUML(self, buttonClicked):
currBlock = 'b' + str(self.prm['currentBlock'])
if self.prm['startOfBlock'] == True:
self.fullFileLines = []
self.fullFileSummLines = []
gammax = 1/self.prm[currBlock]['nAlternatives']
if self.prm['stimScale'] == "Linear":
self.UML = setupUML(model=self.prm['psyFunType'],
swptRule=self.prm['swptRule'],
nDown=self.prm["numberCorrectNeeded"],
centTend = self.prm["psyFunPosteriorSummary"],
stimScale = self.prm['stimScale'],
x0=self.prm['adaptiveParam'],
xLim=(self.prm['stimLo'], self.prm['stimHi']),
alphaLim=(self.prm['loMidPoint'], self.prm['hiMidPoint']),
alphaStep=self.prm['midPointStep'],
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=self.prm['midPointPriorMu'],
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'], self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gamma=gammax,
lambdaLim=(self.prm['loLapse'], self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
suggestedLambdaSwpt=self.prm['suggestedLambdaSwpt'],
lambdaSwptPC=self.prm['lambdaSwptPC'])
elif self.prm['stimScale'] == "Logarithmic":
self.UML = setupUML(model=self.prm['psyFunType'],
swptRule=self.prm['swptRule'],
nDown=self.prm["numberCorrectNeeded"],
centTend = self.prm["psyFunPosteriorSummary"],
stimScale = self.prm['stimScale'],
x0=abs(self.prm['adaptiveParam']),
xLim=(abs(self.prm['stimLo']), abs(self.prm['stimHi'])),
alphaLim=(abs(self.prm['loMidPoint']), abs(self.prm['hiMidPoint'])),
alphaStep=abs(self.prm['midPointStep']),
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=self.prm['midPointPriorMu'],
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'], self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gamma=gammax,
lambdaLim=(self.prm['loLapse'], self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
suggestedLambdaSwpt=abs(self.prm['suggestedLambdaSwpt']),
lambdaSwptPC=self.prm['lambdaSwptPC'])
if self.prm["saveUMLState"] == True:
try:
self.UML["p"] = np.load(os.path.dirname(self.prm['resultsFile'])+self.prm[currBlock]['conditionLabel']+".npy")
print("Previous block state loaded")
except:
print("Previous block state could not be loaded")
pass
self.prm['startOfBlock'] = False
self.trialCount = 0
self.fullFileLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
if buttonClicked == self.correctButton:
response = 1
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
elif buttonClicked != self.correctButton:
response = 0
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"]])
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLog.write(str(response)+'; ')
self.fullFileLines.append(str(response)+'; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(response) + self.prm['pref']["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.trialCount = self.trialCount +1
self.fullFileLog.flush()
pcDone = (self.trialCount / self.prm['nTrials']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount == self.prm['nTrials']:
self.writeResultsHeader('standard')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
self.resFile.write('Midpoint = %5.3f ' %self.UML['est_midpoint'])
self.resFileLog.write('Midpoint = %5.3f ' %self.UML['est_midpoint'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Slope = %5.3f ' %self.UML['est_slope'])
self.resFileLog.write('Slope = %5.3f ' %self.UML['est_slope'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Lapse = %5.3f ' %self.UML['est_lapse'])
self.resFileLog.write('Lapse = %5.3f ' %self.UML['est_lapse'])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(self.UML['est_midpoint']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.UML['est_slope']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.UML['est_lapse']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:d}'.format(int(self.prm["nTrials"])) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('UML', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('UML', resLineToWriteSummFull)
if self.prm["saveUMLState"] == True:
# if int("".join(np.__version__.split("."))) >=182:
# np.save(os.path.dirname(self.prm['resultsFile'])+self.prm[currBlock]['conditionLabel']+".npy", self.UML["p"], allow_pickle=False, fix_imports=False)
# else:
np.save(os.path.dirname(self.prm['resultsFile'])+self.prm[currBlock]['conditionLabel']+".npy", self.UML["p"])#, allow_pickle=False, fix_imports=False)
del self.UML #clear memory
self.atBlockEnd()
else:
self.UML = UML_update(self.UML, response)
if self.prm['stimScale'] == "Logarithmic":
if self.prm['adaptiveParam'] >=0:
self.prm['adaptiveParam'] = self.UML["xnextLinear"]
else:
self.prm['adaptiveParam'] = -self.UML["xnextLinear"]
else:
self.prm['adaptiveParam'] = self.UML["xnextLinear"]
# print("Est. thresh: " + str(self.UML['est_midpoint']))
# print('Next Stim: ' + str(self.prm['adaptiveParam']))
# print(self.UML["phi"])
self.doTrial()
def sortResponseUMLEstGuessRate(self, buttonClicked):
currBlock = 'b' + str(self.prm['currentBlock'])
if self.prm['startOfBlock'] == True:
self.fullFileLines = []
self.fullFileSummLines = []
if self.prm['stimScale'] == "Linear":
self.UML = setupUMLEstGuessRate(model=self.prm['psyFunType'],
swptRule=self.prm['swptRule'],
nDown=self.prm["numberCorrectNeeded"],
centTend = self.prm["psyFunPosteriorSummary"],
stimScale = self.prm['stimScale'],
x0=self.prm['adaptiveParam'],
xLim=(self.prm['stimLo'], self.prm['stimHi']),
alphaLim=(self.prm['loMidPoint'], self.prm['hiMidPoint']),
alphaStep=self.prm['midPointStep'],
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=self.prm['midPointPriorMu'],
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'], self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gammaLim=(self.prm['loGuess'], self.prm['hiGuess']),
gammaStep=self.prm['guessStep'],
gammaSpacing=self.prm['guessSpacing'],
gammaDist=self.prm['guessPrior'],
gammaMu=self.prm['guessPriorMu'],
gammaSTD=self.prm['guessPriorSTD'],
lambdaLim=(self.prm['loLapse'], self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
suggestedLambdaSwpt=self.prm['suggestedLambdaSwpt'],
lambdaSwptPC=self.prm['lambdaSwptPC'])
elif self.prm['stimScale'] == "Logarithmic":
self.UML = setupUMLEstGuessRate(model=self.prm['psyFunType'],
swptRule=self.prm['swptRule'],
nDown=self.prm["numberCorrectNeeded"],
centTend = self.prm["psyFunPosteriorSummary"],
stimScale = self.prm['stimScale'],
x0=abs(self.prm['adaptiveParam']),
xLim=(abs(self.prm['stimLo']), abs(self.prm['stimHi'])),
alphaLim=(abs(self.prm['loMidPoint']), abs(self.prm['hiMidPoint'])),
alphaStep=abs(self.prm['midPointStep']),
alphaSpacing="Linear",
alphaDist=self.prm['midPointPrior'],
alphaMu=self.prm['midPointPriorMu'],
alphaSTD=self.prm['midPointPriorSTD'],
betaLim=(self.prm['loSlope'], self.prm['hiSlope']),
betaStep=self.prm['slopeStep'],
betaSpacing=self.prm['slopeSpacing'],
betaDist=self.prm['slopePrior'],
betaMu=self.prm['slopePriorMu'],
betaSTD=self.prm['slopePriorSTD'],
gammaLim=(self.prm['loGuess'], self.prm['hiGuess']),
gammaStep=self.prm['guessStep'],
gammaSpacing=self.prm['guessSpacing'],
gammaDist=self.prm['guessPrior'],
gammaMu=self.prm['guessPriorMu'],
gammaSTD=self.prm['guessPriorSTD'],
lambdaLim=(self.prm['loLapse'], self.prm['hiLapse']),
lambdaStep=self.prm['lapseStep'],
lambdaSpacing=self.prm['lapseSpacing'],
lambdaDist=self.prm['lapsePrior'],
lambdaMu=self.prm['lapsePriorMu'],
lambdaSTD=self.prm['lapsePriorSTD'],
suggestedLambdaSwpt=abs(self.prm['suggestedLambdaSwpt']),
lambdaSwptPC=self.prm['lambdaSwptPC'])
self.prm['startOfBlock'] = False
self.trialCount = 0
self.fullFileLines = []
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] + 1
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
elif buttonClicked != self.correctButton:
response = 0
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
if buttonClicked == self.prm["YesButton"]:
response = 1 #here response indicate whether listener said "Yes", or equivalent, not whether the response was correct or not
else:
response = 0
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"]])
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLog.write(str(response)+'; ')
self.fullFileLines.append(str(response)+'; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(response) + self.prm['pref']["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.trialCount = self.trialCount +1
self.fullFileLog.flush()
pcDone = (self.trialCount / self.prm['nTrials']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCount == self.prm['nTrials']:
self.writeResultsHeader('standard')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
self.resFile.write('Midpoint = %5.3f ' %self.UML['est_midpoint'])
self.resFileLog.write('Midpoint = %5.3f ' %self.UML['est_midpoint'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Guess = %5.3f ' %self.UML['est_guess'])
self.resFileLog.write('Guess = %5.3f ' %self.UML['est_guess'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Slope = %5.3f ' %self.UML['est_slope'])
self.resFileLog.write('Slope = %5.3f ' %self.UML['est_slope'])
self.resFile.write('\n')
self.resFileLog.write('\n')
self.resFile.write('Lapse = %5.3f ' %self.UML['est_lapse'])
self.resFileLog.write('Lapse = %5.3f ' %self.UML['est_lapse'])
self.resFile.write('\n\n')
self.resFileLog.write('\n\n')
for i in range(self.prm['nAlternatives']):
self.resFile.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
self.resFileLog.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
self.resFile.write(', ')
self.resFileLog.write(', ')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = '{0:5.3f}'.format(self.UML['est_midpoint']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.UML['est_guess']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.UML['est_slope']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:5.3f}'.format(self.UML['est_lapse']) + self.prm['pref']["general"]["csvSeparator"] + \
'{0:d}'.format(int(self.prm['nTrials'])) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('UML - Est. Guess Rate', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('UML - Est. Guess Rate', resLineToWriteSummFull)
del self.UML #clear memory
self.atBlockEnd()
else:
self.UML = UMLEstGuessRate_update(self.UML, response)
if self.prm['stimScale'] == "Logarithmic":
if self.prm['adaptiveParam'] >=0:
self.prm['adaptiveParam'] = self.UML["xnextLinear"]
else:
self.prm['adaptiveParam'] = -self.UML["xnextLinear"]
else:
self.prm['adaptiveParam'] = self.UML["xnextLinear"]
# print("Est. thresh: " + str(self.UML['est_midpoint']))
# print('Next Stim: ' + str(self.prm['adaptiveParam']))
# print(self.UML["phi"])
self.doTrial()
def sortResponseMultipleConstantsOddOneOut(self, buttonClicked):
if self.prm['startOfBlock'] == True: #Initialize counts and data structures
self.prm['startOfBlock'] = False
self.prm['ones'] = 0
self.prm['twos'] = 0
self.prm['threes'] = 0
self.fullFileLines = []
self.fullFileSummLines = []
self.trialCountCnds = {}
self.correctCountCnds = {}
for i in range(self.prm['nDifferences']):
self.trialCountCnds[self.prm['conditions'][i]] = 0
self.correctCountCnds[self.prm['conditions'][i]] = 0
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] +1
self.trialCountCnds[self.currentCondition] = self.trialCountCnds[self.currentCondition] +1
if buttonClicked == self.correctButton:
if self.trialCountCnds[self.currentCondition] > self.prm['nPracticeTrials']:
self.correctCountCnds[self.currentCondition] = self.correctCountCnds[self.currentCondition] +1
resp = 1
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
elif buttonClicked != self.correctButton:
resp = 0
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(self.currentCondition + '; ' + str(resp) + '; ')
self.fullFileLines.append(self.currentCondition + '; ' + str(resp) + '; ')
self.fullFileSummLines.append([self.currentCondition + self.prm['pref']["general"]["csvSeparator"] +
str(resp) + self.prm['pref']["general"]["csvSeparator"]])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]) + self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
cnt = 0
for j in range(self.prm['nDifferences']):
cnt = cnt + self.trialCountCnds[self.prm['conditions'][j]]
pcDone = cnt / ((self.prm['nTrials']+self.prm['nPracticeTrials']) *self.prm['nDifferences']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCountCnds[self.currentCondition] == self.prm['nTrials']:
self.prm['comparisonChoices'].remove(self.currentCondition)
if len(self.prm['comparisonChoices']) == 0: #Block is completed
dp_diff = {}; dp_IO = {}
prCorr = {}
for cnd in self.prm['conditions']:
prCorr[cnd] = self.correctCountCnds[cnd] / self.trialCountCnds[cnd]
try:
dp_IO[cnd] = dprime_oddity(prCorr[cnd], meth="IO")
except:
dp_IO[cnd] = nan
try:
dp_diff[cnd] = dprime_oddity(prCorr[cnd], meth="diff")
except:
dp_diff[cnd] = nan
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
for ftyp in [self.resFile, self.resFileLog]:
for cnd in self.prm['conditions']:
ftyp.write('Condition %s\n\n' %(cnd))
ftyp.write('No. Correct = %d\n' %(self.correctCountCnds[cnd]))
ftyp.write('No. Trials = %d\n' %(self.trialCountCnds[cnd]))
ftyp.write('Percent Correct = %5.3f\n' %(prCorr[cnd]*100))
ftyp.write('d-prime IO = %5.3f\n' %(dp_IO[cnd]))
ftyp.write('d-prime diff = %5.3f\n' %(dp_diff[cnd]))
ftyp.write('\n\n')
for i in range(self.prm['nAlternatives']):
ftyp.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
ftyp.write(', ')
ftyp.write('\n\n')
ftyp.flush()
self.fullFile.flush()
self.fullFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = ""#str(self.prm['nTrials']) + self.prm['pref']["general"]["csvSeparator"]
for cnd in self.prm['conditions']:
resLineToWrite = resLineToWrite + str(self.correctCountCnds[cnd]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.trialCountCnds[cnd]) + self.prm['pref']["general"]["csvSeparator"] + \
str(prCorr[cnd]*100) + self.prm['pref']["general"]["csvSeparator"] + \
str(dp_IO[cnd]) + self.prm['pref']["general"]["csvSeparator"] + \
str(dp_diff[cnd]) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Multiple Constants Odd One Out', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('Multiple Constants Odd One Out', resLineToWriteSummFull)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseMultipleConstantsSoundComparison(self, buttonClicked):
if self.prm['startOfBlock'] == True: #Initialize counts and data structures
self.prm['startOfBlock'] = False
self.prm['ones'] = 0
self.prm['twos'] = 0
self.prm['threes'] = 0
self.fullFileLines = []
self.stimCount = {}
self.trialCountCnds = {}
#self.correctCountCnds = {}
for i in range(self.prm['nDifferences']):
self.stimCount[self.prm['conditions'][i]] = [0,0,0]
self.trialCountCnds[self.prm['conditions'][i]] = 0
#self.correctCountCnds[self.prm['conditions'][i]] = 0
self.prm['buttonCounter'] = [0 for i in range(self.prm['nAlternatives'])]
self.prm['buttonCounter'][buttonClicked-1] = self.prm['buttonCounter'][buttonClicked-1] +1
self.trialCountCnds[self.currentCondition] = self.trialCountCnds[self.currentCondition] +1
if self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
if self.trialCountCnds[self.currentCondition] > self.prm['nPracticeTrials']:
if buttonClicked == 1:
self.stimCount[self.currentCondition][self.prm['currStimOrder'][0]] = self.stimCount[self.currentCondition][self.prm['currStimOrder'][0]]+1
elif buttonClicked == 2:
self.stimCount[self.currentCondition][self.prm['currStimOrder'][1]] = self.stimCount[self.currentCondition][self.prm['currStimOrder'][1]]+1
elif buttonClicked == 3:
self.stimCount[self.currentCondition][self.prm['currStimOrder'][2]] = self.stimCount[self.currentCondition][self.prm['currStimOrder'][2]]+1
resp = str(self.prm['currStimOrder'][buttonClicked-1]+1)
self.fullFileLog.write(self.currentCondition + '; ' + resp + '; ')
self.fullFileLines.append(self.currentCondition + '; ' + resp + '; ')
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileLog.flush()
cnt = 0
for i in range(len(self.prm['conditions'])):
cnt = cnt + self.trialCountCnds[self.prm['conditions'][i]]
pcDone = cnt / self.prm['nTrials'] *len(self.prm['conditions']) * 100
bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
self.gauge.setValue(pcTot)
if self.trialCountCnds[self.currentCondition] == self.prm['nTrials']:
self.prm['comparisonChoices'].remove(self.currentCondition)
if len(self.prm['comparisonChoices']) == 0: #Block is completed
self.writeResultsHeader('standard')
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.fullFileLog.write('\n')
self.fullFile.write('\n')
for ftyp in [self.resFile, self.resFileLog]:
for cnd in self.prm['conditions']:
ftyp.write('Condition %s\n\n' %(cnd))
ftyp.write('\n')
ftyp.write('Stimulus 1 = %d/%d; Percent = %5.2f\n' %(self.stimCount[cnd][0], self.prm['nTrials'], self.stimCount[cnd][0]/self.prm['nTrials']*100))
ftyp.write('Stimulus 2 = %d/%d; Percent = %5.2f\n' %(self.stimCount[cnd][1], self.prm['nTrials'], self.stimCount[cnd][1]/self.prm['nTrials']*100))
ftyp.write('Stimulus 3 = %d/%d; Percent = %5.2f\n' %(self.stimCount[cnd][2], self.prm['nTrials'], self.stimCount[cnd][2]/self.prm['nTrials']*100))
ftyp.write('\n\n')
for i in range(self.prm['nAlternatives']):
ftyp.write("B{0} = {1}".format(i+1, self.prm['buttonCounter'][i]))
if i != self.prm['nAlternatives']-1:
ftyp.write(', ')
ftyp.write('\n\n')
ftyp.flush()
self.fullFile.flush()
self.fullFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = str(self.prm['nTrials']) + self.prm['pref']["general"]["csvSeparator"]
for cnd in self.prm['conditions']:
resLineToWrite = resLineToWrite + str(self.stimCount[cnd][0]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.stimCount[cnd][0]/self.prm['nTrials']*100) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.stimCount[cnd][1]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.stimCount[cnd][1]/self.prm['nTrials']*100) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.stimCount[cnd][2]) + self.prm['pref']["general"]["csvSeparator"] + \
str(self.stimCount[cnd][2]/self.prm['nTrials']*100) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Multiple Constants Sound Comparison', resLineToWrite)
self.atBlockEnd()
else:
self.doTrial()
def sortResponseAdaptiveDigitSpan(self, buttonClicked):
if self.prm['startOfBlock'] == True:
self.prm['correct'] = []
self.prm['sequenceLength'] = []
self.prm['nTrialsSequence'] = 0
self.prm['startOfBlock'] = False
self.fullFileLines = []
self.fullFileSummLines = []
self.prm['nTrialsSequence'] = self.prm['nTrialsSequence'] +1
self.prm['sequenceLength'].append(self.prm['adaptiveParam'])
if buttonClicked == self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("correct")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"]])
self.fullFileLog.write('1; ')
self.fullFileLines.append('1; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('1' + self.prm['pref']["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write(' ;')
self.fullFileLines.append(' ;')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.prm['correct'].append(1)
#self.prm['adaptiveParam'] = self.prm['adaptiveParam']+1
#self.runAnotherTrial = True
elif buttonClicked != self.correctButton:
if self.prm["responseLight"] == self.tr("Feedback"):
self.responseLight.giveFeedback("incorrect")
elif self.prm["responseLight"] == self.tr("Neutral"):
self.responseLight.giveFeedback("neutral")
elif self.prm["responseLight"] == self.tr("None"):
self.responseLight.giveFeedback("off")
self.fullFileLog.write(str(self.prm['adaptiveParam']) + '; ')
self.fullFileLines.append(str(self.prm['adaptiveParam']) + '; ')
self.fullFileSummLines.append([str(self.prm['adaptiveParam']) + self.prm['pref']["general"]["csvSeparator"]])
self.fullFileLog.write('0; ')
self.fullFileLines.append('0; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append('0' + self.prm['pref']["general"]["csvSeparator"])
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write'])):
self.fullFileLog.write(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLines.append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(self.prm['additional_parameters_to_write'][p]))
self.fullFileLog.write('; ')
self.fullFileLines.append('; ')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.prm['correct'].append(0)
#if self.prm['correct'][len(self.prm['correct'])-2] == 1:
# self.runAnotherTrial = True
#elif self.prm['correct'][len(self.prm['correct'])-2] == 0: #got two consecutive incorrect responses
# self.runAnotherTrial = False
self.fullFileLog.write(str(buttonClicked) + '; ')
self.fullFileLines.append(str(buttonClicked) + '; ')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(str(buttonClicked))
self.fullFileSummLines[len(self.fullFileSummLines)-1].append(self.prm['pref']["general"]["csvSeparator"])
self.fullFileLog.flush()
# pcDone = (self.prm['nTurnpoints'] / self.prm['totalTurnpoints']) * 100
# bp = int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'])
# pcThisRep = (bp-1) / self.prm['storedBlocks']*100 + 1 / self.prm['storedBlocks']*pcDone
# pcTot = (self.prm['currentRepetition'] - 1) / self.prm['allBlocks']['repetitions']*100 + 1 / self.prm['allBlocks']['repetitions']*pcThisRep
# self.gauge.setValue(pcTot)
#if self.runAnotherTrial == False:
if self.prm['nTrialsSequence'] == 2:
if self.prm['correct'][len(self.prm['correct'])-1] == 1 or self.prm['correct'][len(self.prm['correct'])-2] == 1:
keepGoing = True
self.prm['adaptiveParam'] = self.prm['adaptiveParam']+1
self.prm['nTrialsSequence'] = 0
else:
keepGoing = False
else:
keepGoing = True
if keepGoing == False:
self.gauge.setValue(100)
self.writeResultsHeader('standard')
self.fullFileLog.write('\n')
self.fullFileLines.append('\n')
digitSpan = int(self.prm['adaptiveParam'] -1)
digitSpanScore = np.sum(np.array(self.prm['correct']))
for i in range(len(self.fullFileLines)):
self.fullFile.write(self.fullFileLines[i])
self.resFile.write("Longest Digit Span = " + str(digitSpan) + '\n')
self.resFile.write("Digit Span Score= " + str(digitSpanScore) + '\n')
self.resFile.write('\n\n')
self.resFile.flush()
self.resFileLog.write('\n\n')
self.resFileLog.flush()
self.getEndTime()
currBlock = 'b' + str(self.prm['currentBlock'])
durString = '{0:5.3f}'.format(self.prm['blockEndTime'] - self.prm['blockStartTime'])
resLineToWrite = str(digitSpan) + self.prm['pref']["general"]["csvSeparator"] + \
str(digitSpanScore) + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = self.getCommonTabFields(resLineToWrite)
resLineToWrite = resLineToWrite + '\n'
self.writeResultsSummaryLine('Digit Span', resLineToWrite)
resLineToWriteSummFull = ""
for i in range(len(self.fullFileSummLines)):
resLineToWriteSummFull = resLineToWriteSummFull + " ".join(self.fullFileSummLines[i]) + \
self.prm[currBlock]['conditionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['listener'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['sessionLabel'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['allBlocks']['experimentLabel'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm['blockEndDateString'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm['blockEndTimeString'] + self.prm['pref']["general"]["csvSeparator"] + \
durString + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['blockPosition'] + self.prm['pref']["general"]["csvSeparator"] + \
self.prm[currBlock]['experiment'] + self.prm['pref']["general"]["csvSeparator"] +\
self.prm[currBlock]['paradigm'] + self.prm['pref']["general"]["csvSeparator"]
resLineToWriteSummFull = self.getCommonTabFields(resLineToWriteSummFull)
resLineToWriteSummFull = resLineToWriteSummFull + '\n'
self.writeResultsSummaryFullLine('Digit Span', resLineToWriteSummFull)
self.atBlockEnd()
else:
self.doTrial()
def whenFinished(self):
if self.prm['currentRepetition'] == self.prm['allBlocks']['repetitions']:
self.statusButton.setText(self.prm['rbTrans'].translate("rb", "Finished"))
self.gauge.setValue(100)
QApplication.processEvents()
self.fullFile.close()
self.resFile.close()
self.fullFileLog.close()
self.resFileLog.close()
self.prm['shuffled'] = False
if self.prm["allBlocks"]["procRes"] == True:
self.processResultsEnd()
if self.prm["allBlocks"]["procResTable"] == True:
self.processResultsTableEnd()
if self.prm["allBlocks"]["winPlot"] == True or self.prm["allBlocks"]["pdfPlot"] == True:
self.plotDataEnd(winPlot=self.prm["allBlocks"]["winPlot"], pdfPlot = self.prm["allBlocks"]["pdfPlot"])
if self.prm["pref"]["general"]["playEndMessage"] == True:
self.playEndMessage()
if self.prm['pref']['email']['sendData'] == True:
self.sendData()
commandsToExecute = []
cmd1 = self.parseCustomCommandArguments(self.prm['allBlocks']['endExpCommand'])
cmd2 = self.parseCustomCommandArguments(self.prm['pref']["general"]["atEndCustomCommand"])
if len(cmd1) > 0:
commandsToExecute.append(cmd1)
if len(cmd2) > 0:
commandsToExecute.append(cmd2)
if len(commandsToExecute) > 0:
self.executerThread.executeCommand(commandsToExecute)
if self.prm['quit'] == True:
self.parent().deleteLater()
else:
self.prm['currentRepetition'] = self.prm['currentRepetition'] + 1
self.parent().moveToBlockPosition(1)
if self.prm['allBlocks']['shuffleMode'] == self.tr('Auto'):
self.parent().onClickShuffleBlocksButton()
self.prm["shuffled"] = True
elif self.prm['allBlocks']['shuffleMode'] == self.tr('Ask') and self.prm['shuffled'] == True:
#if user shuffled on first repetion, then shuffle on each repetition, otherwise don't shuffle
self.parent().onClickShuffleBlocksButton()
self.prm["shuffled"] = True
if self.prm['allBlocks']['responseMode'] in [self.tr("Automatic"), self.tr("Simulated Listener"), self.tr("Psychometric")]:
self.onClickStatusButton()
def atBlockEnd(self):
self.writeResultsFooter('log'); self.writeResultsFooter('standard')
bp = int(self.prm['b'+str(self.prm["currentBlock"])]["blockPosition"])
cb = (self.prm['currentRepetition']-1)*self.prm["storedBlocks"]+bp
self.blockGauge.setValue(cb)
self.blockGauge.setFormat(self.prm['rbTrans'].translate('rb', "Blocks Completed") + ': ' + str(cb) + '/' + str(self.prm['storedBlocks']*self.prm['allBlocks']['repetitions']))
if self.prm['allBlocks']['sendTriggers'] == True:
thisSnd = pureTone(440, 0, -200, 80, 10, "Both", self.prm['allBlocks']['sampRate'], 100)
#playCmd = self.prm['pref']['sound']['playCommand']
time.sleep(1)
self.audioManager.playSoundWithTrigger(thisSnd, self.prm['allBlocks']['sampRate'], self.prm['allBlocks']['nBits'], False, 'OFFTrigger.wav', self.prm["pref"]["general"]["OFFTrigger"])
print("SENDING END TRIGGER", self.prm["pref"]["general"]["OFFTrigger"])
if self.prm['currentRepetition'] == self.prm['allBlocks']['repetitions'] and int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition']) + self.prm['pref']['email']['nBlocksNotify'] == self.prm['storedBlocks']:
cmd = self.parseCustomCommandArguments(self.prm['pref']["general"]["nBlocksCustomCommand"])
if len(cmd) > 0:
self.executerThread.executeCommand([cmd])
if self.prm['pref']['email']['notifyEnd'] == True:
self.sendEndNotification()
if int(self.prm['b'+str(self.prm['currentBlock'])]['blockPosition']) < self.prm['storedBlocks']:
self.parent().onClickNextBlockPositionButton()
if self.prm['allBlocks']['responseMode'] == self.tr("Automatic") or self.prm['allBlocks']['responseMode'] == self.tr("Simulated Listener") or self.prm['allBlocks']['responseMode'] == self.tr("Psychometric"):
self.onClickStatusButton()
else:
return
else:
self.whenFinished()
self.prm['cmdOutFileHandle'].flush()
def getEndTime(self):
self.prm['blockEndTime'] = time.time()
self.prm['blockEndTimeStamp'] = QDateTime.toString(QDateTime.currentDateTime(), self.currLocale.dateTimeFormat(self.currLocale.ShortFormat))
self.prm['blockEndDateString'] = QDate.toString(QDate.currentDate(), self.currLocale.dateFormat(self.currLocale.ShortFormat))
self.prm['blockEndTimeString'] = QTime.toString(QTime.currentTime(), self.currLocale.timeFormat(self.currLocale.ShortFormat))
def getStartTime(self):
self.prm['blockStartTime'] = time.time()
self.prm['blockStartTimeStamp'] = QDateTime.toString(QDateTime.currentDateTime(), self.currLocale.dateTimeFormat(self.currLocale.ShortFormat))
self.prm['blockStartDateString'] = QDate.toString(QDate.currentDate(), self.currLocale.dateFormat(self.currLocale.ShortFormat))
self.prm['blockStartTimeString'] = QTime.toString(QTime.currentTime(), self.currLocale.timeFormat(self.currLocale.ShortFormat))
def writeResultsHeader(self, fileType):
if fileType == 'log':
resLogFilePath = self.prm['backupDirectoryName'] + time.strftime("%y-%m-%d_%H-%M-%S", time.localtime()) + '_' + self.prm['listener'] + '_log'
resLogFullFilePath = self.prm['backupDirectoryName'] + time.strftime("%y-%m-%d_%H-%M-%S", time.localtime()) + '_' + self.prm['listener'] + 'full_log'
self.resFileLog = open(resLogFilePath, 'a')
self.fullFileLog = open(resLogFullFilePath, 'a')
filesToWrite = [self.resFileLog, self.fullFileLog]
elif fileType == 'standard':
resFilePath = self.prm['resultsFile']
fullFilePath = self.prm['resultsFile'].split('.txt')[0] + self.prm['pref']["general"]["fullFileSuffix"] + '.txt'
self.resFile = open(resFilePath, 'a')
self.fullFile = open(fullFilePath, 'a')
filesToWrite = [self.resFile, self.fullFile]
currBlock = 'b' + str(self.prm['currentBlock'])
for i in range(2):
thisFile = filesToWrite[i]
thisFile.write('*******************************************************\n')
thisFile.write('pychoacoustics version: ' + self.prm['version'] + '; build date: ' + self.prm['builddate'] + '\n')
if 'version' in self.prm[self.parent().currExp]:
thisFile.write('Experiment version: ' + self.prm[self.parent().currExp]['version'] + '\n')
thisFile.write('Block Number: ' + str(self.prm['currentBlock']) + '\n')
thisFile.write('Block Position: ' + self.prm['b'+str(self.prm['currentBlock'])]['blockPosition'] + '\n')
thisFile.write('Start: ' + self.prm['blockStartTimeStamp']+ '\n')
thisFile.write('+++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
thisFile.write('Experiment Label: ' + self.prm['allBlocks']['experimentLabel'] + '\n')
thisFile.write('Session Label: ' + self.prm['sessionLabel'] + '\n')
thisFile.write('Condition Label: ' + self.prm[currBlock]['conditionLabel'] + '\n')
thisFile.write('Experiment: ' + self.prm[currBlock]['experiment'] + '\n')
thisFile.write('Listener: ' + self.prm['listener'] + '\n')
thisFile.write('Response Mode: ' + self.prm['allBlocks']['responseMode'] + '\n')
if self.prm['allBlocks']['responseMode'] == self.tr("Automatic"):
thisFile.write('Auto Resp. Mode Perc. Corr.: ' + str(self.prm['allBlocks']['autoPCCorr']) + '\n')
elif self.prm['allBlocks']['responseMode'] == self.tr("Psychometric"):
thisFile.write('Psychometric Listener Function: ' + str(self.prm[currBlock]['psyListFun']) + '\n')
thisFile.write('Psychometric Listener Function Fit: ' + str(self.prm[currBlock]['psyListFunFit']) + '\n')
thisFile.write('Psychometric Listener Midpoint: ' + str(self.prm[currBlock]['psyListMidpoint']) + '\n')
thisFile.write('Psychometric Listener Slope: ' + str(self.prm[currBlock]['psyListSlope']) + '\n')
thisFile.write('Psychometric Listener Lapse: ' + str(self.prm[currBlock]['psyListLapse']) + '\n')
thisFile.write('Paradigm: ' + self.prm['paradigm'] +'\n')
thisFile.write('Intervals: ' + self.currLocale.toString(self.prm['nIntervals']) + '\n')
thisFile.write('Alternatives: ' + self.currLocale.toString(self.prm['nAlternatives']) + '\n')
for j in range(len(self.prm[currBlock]['paradigmChooser'])):
thisFile.write(self.prm[currBlock]['paradigmChooserLabel'][j] + ' ' + self.prm[currBlock]['paradigmChooser'][j] + '\n')
for j in range(len(self.prm[currBlock]['paradigmField'])):
thisFile.write(self.prm[currBlock]['paradigmFieldLabel'][j] + ': ' + self.currLocale.toString(self.prm[currBlock]['paradigmField'][j], precision=self.prm["pref"]["general"]["precision"]) + '\n')
thisFile.write('Phones: ' + self.prm['allBlocks']['currentPhones'] + '\n')
thisFile.write('Sample Rate: ' + self.currLocale.toString(self.prm['allBlocks']['sampRate']) + '\n')
thisFile.write('Bits: ' + self.currLocale.toString(self.prm['allBlocks']['nBits']) + '\n')
thisFile.write('Pre-Trial Silence (ms): ' + self.currLocale.toString(self.prm[currBlock]['preTrialSilence']) + '\n')
thisFile.write('Warning Interval: ' + str(self.prm[currBlock]['warningInterval']) + '\n')
thisFile.write('Interval Lights: ' + self.prm[currBlock]['intervalLights'] + '\n')
if self.prm[currBlock]['warningInterval'] == self.tr("Yes"):
thisFile.write('Warning Interval Duration (ms): ' + self.currLocale.toString(self.prm[currBlock]['warningIntervalDur']) + '\n')
thisFile.write('Warning Interval ISI (ms): ' + self.currLocale.toString(self.prm[currBlock]['warningIntervalISI']) + '\n')
thisFile.write('Response Light: ' + self.prm['responseLight'] + '\n')
thisFile.write('Response Light Type: ' + self.prm['responseLightType'] + '\n')
thisFile.write('Response Light Duration (ms): ' + self.currLocale.toString(self.prm[currBlock]['responseLightDuration']) + '\n')
if self.prm[self.parent().currExp]["hasISIBox"] == True:
thisFile.write('ISI: ' + self.currLocale.toString(self.prm['isi']) + '\n')
if self.prm[self.parent().currExp]["hasPreTrialInterval"] == True:
thisFile.write('Pre-Trial Interval: ' + self.prm[currBlock]['preTrialInterval'] + '\n')
if self.prm[currBlock]['preTrialInterval'] == self.tr("Yes"):
thisFile.write('Pre-Trial Interval ISI: ' + self.currLocale.toString(self.prm[currBlock]['preTrialIntervalISI']) + '\n')
if self.prm[self.parent().currExp]["hasPrecursorInterval"] == True:
thisFile.write('Precursor Interval: ' + self.prm[currBlock]['precursorInterval'] + '\n')
if self.prm[currBlock]['precursorInterval'] == self.tr("Yes"):
thisFile.write('Precursor Interval ISI: ' + self.currLocale.toString(self.prm[currBlock]['precursorIntervalISI']) + '\n')
if self.prm[self.parent().currExp]["hasPostcursorInterval"] == True:
thisFile.write('Postcursor Interval: ' + self.prm[currBlock]['postcursorInterval'] + '\n')
if self.prm[currBlock]['postcursorInterval'] == self.tr("Yes"):
thisFile.write('Postcursor Interval ISI: ' + self.currLocale.toString(self.prm[currBlock]['postcursorIntervalISI']) + '\n')
if self.prm[self.parent().currExp]["hasAltReps"] == True:
thisFile.write('Alternated (AB) Reps.: ' + self.currLocale.toString(self.prm['altReps']) + '\n')
thisFile.write('Alternated (AB) Reps. ISI (ms): ' + self.currLocale.toString(self.prm['altRepsISI']) + '\n')
thisFile.write('\n')
for j in range(len(self.prm[currBlock]['chooser'])):
if j not in self.parent().choosersToHide:
thisFile.write(self.parent().chooserLabel[j].text() + ' ' + self.prm[currBlock]['chooser'][j] + '\n')
for j in range(len(self.prm[currBlock]['fileChooser'])):
if j not in self.parent().fileChoosersToHide:
thisFile.write(self.parent().fileChooserButton[j].text() + ' ' + self.prm[currBlock]['fileChooser'][j] + '\n')
for j in range(len(self.prm[currBlock]['dirChooser'])):
if j not in self.parent().dirChoosersToHide:
thisFile.write(self.parent().dirChooserButton[j].text() + ' ' + self.prm[currBlock]['dirChooser'][j] + '\n')
for j in range(len(self.prm[currBlock]['field'])):
if j not in self.parent().fieldsToHide and self.parent().fieldLabel[j].text()!= "Random Seed":
thisFile.write(self.parent().fieldLabel[j].text() + ': ' + self.currLocale.toString(self.prm[currBlock]['field'][j]) + '\n')
thisFile.write('+++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
thisFile.flush()
def writeResultsFooter(self, fileType):
if fileType == 'log':
filesToWrite = [self.resFileLog, self.fullFileLog]
elif fileType == 'standard':
filesToWrite = [self.resFile, self.fullFile]
for i in range(2):
thisFile = filesToWrite[i]
#thisFile.write('*******************************************************\n\n')
thisFile.write('End: ' + self.prm['blockEndTimeStamp'] + '\n')
thisFile.write('Duration: {} min. \n'.format( (self.prm['blockEndTime'] - self.prm['blockStartTime']) / 60 ))
thisFile.write('\n')
thisFile.flush()
def writeResultsSummaryLine(self, paradigm, resultsLine):
if paradigm in ['Transformed Up-Down', 'Transformed Up-Down Limited', 'Weighted Up-Down', 'Weighted Up-Down Limited']:
headerToWrite = 'threshold_' + self.prm['adaptiveType'].lower() + self.prm['pref']["general"]["csvSeparator"] + \
'SD' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Transformed Up-Down Hybrid', 'Weighted Up-Down Hybrid']:
headerToWrite = 'threshold_' + self.prm['adaptiveType'].lower() + self.prm['pref']["general"]["csvSeparator"] + \
'SD' + self.prm['pref']["general"]["csvSeparator"] + \
'nCorrAtMaxLev' + self.prm['pref']["general"]["csvSeparator"] + \
'nTotAtMaxLev' + self.prm['pref']["general"]["csvSeparator"] + \
'percCorrAtMaxLev' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Transformed Up-Down Interleaved', 'Weighted Up-Down Interleaved']:
headerToWrite = ''
for j in range(self.prm['nDifferences']):
headerToWrite = headerToWrite + 'threshold_' + self.prm['adaptiveType'].lower() + '_track' + str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'SD_track'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Constant 1-Interval 2-Alternatives']:
headerToWrite = 'dprime' + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal'+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrectA'+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotalA'+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrectB'+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotalB'+ self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] +\
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Constant 1-Pair Same/Different']:
headerToWrite = 'dprime_IO' + self.prm['pref']["general"]["csvSeparator"] + \
'dprime_diff' + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal'+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrect_same'+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_same'+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrect_different'+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_different'+ self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Multiple Constants 1-Pair Same/Different']:
headerToWrite = ''
for j in range(self.prm['nDifferences']):
headerToWrite = headerToWrite + 'dprime_IO_pair' + str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'dprime_diff_pair' + str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nCorrect_same_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_same_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nCorrect_different_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_different_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Multiple Constants ABX']:
headerToWrite = ''
for j in range(self.prm['nDifferences']):
headerToWrite = headerToWrite + 'dprime_IO_pair' + str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'dprime_diff_pair' + str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nCorrect_A_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_A_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nCorrect_B_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_B_pair'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Multiple Constants 1-Interval 2-Alternatives']:
headerToWrite = 'dprime_ALL' + self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_ALL'+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrectA_ALL'+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotalA_ALL'+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrectB_ALL'+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotalB_ALL'+ self.prm['pref']["general"]["csvSeparator"]
for j in range(len(self.prm['conditions'])):
headerToWrite = headerToWrite + 'dprime_subc' + str(j+1)+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotal_subc'+ str(j+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nCorrectA_subc'+ str(j+1)+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotalA_subc'+ str(j+1)+ self.prm['pref']["general"]["csvSeparator"] + \
'nCorrectB_subc'+ str(j+1)+ self.prm['pref']["general"]["csvSeparator"] + \
'nTotalB_subc'+ str(j+1)+ self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Constant m-Intervals n-Alternatives']:
headerToWrite = ''
headerToWrite = headerToWrite + 'dprime' + self.prm['pref']["general"]["csvSeparator"] + \
'perc_corr' + self.prm['pref']["general"]["csvSeparator"] + \
'n_corr'+ self.prm['pref']["general"]["csvSeparator"] +\
'n_trials' + self.prm['pref']["general"]["csvSeparator"]+\
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration' + self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] +\
'paradigm' + self.prm['pref']["general"]["csvSeparator"] +\
'nIntervals' + self.prm['pref']["general"]["csvSeparator"] + \
'nAlternatives' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Multiple Constants m-Intervals n-Alternatives']:
headerToWrite = ''
for i in range(len(self.prm['conditions'])):
headerToWrite = headerToWrite + 'dprime_subc' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'perc_corr_subc' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'n_corr_subc'+ str(i+1) + self.prm['pref']["general"]["csvSeparator"] +\
'n_trials_subc'+ str(i+1) + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'tot_dprime' + self.prm['pref']["general"]["csvSeparator"] + \
'tot_perc_corr' + self.prm['pref']["general"]["csvSeparator"] + \
'tot_n_corr' + self.prm['pref']["general"]["csvSeparator"] + \
'tot_n_trials' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] +\
'paradigm' + self.prm['pref']["general"]["csvSeparator"] +\
'nIntervals' + self.prm['pref']["general"]["csvSeparator"] + \
'nAlternatives' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['PEST']:
headerToWrite = 'threshold_' + self.prm['adaptiveType'].lower() + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Multiple Constants Odd One Out']:
headerToWrite = ""#'nTrials' + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
headerToWrite = headerToWrite + 'nCorr_subcnd'+str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'nTrials_subcnd'+str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'percCorr_subcnd'+str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'dprime_IO_subcnd'+str(i+1) + self.prm['pref']["general"]["csvSeparator"] +\
'dprime_diff_subcnd'+str(i+1) + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Multiple Constants Sound Comparison']:
headerToWrite = 'nTrials' + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm['conditions'])):
headerToWrite = headerToWrite + 'stim1_count_subcnd' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'stim1_percent_subcnd' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'stim2_count_subcnd' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'stim2_percent_subcnd' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'stim3_count_subcnd' + str(i+1) + self.prm['pref']["general"]["csvSeparator"] + \
'stim3_percent_subcnd' + str(i+1) + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Maximum Likelihood']:
headerToWrite = 'threshold' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['PSI', 'UML']:
headerToWrite = 'threshold' + self.prm['pref']["general"]["csvSeparator"] + \
'slope' + self.prm['pref']["general"]["csvSeparator"] + \
'lapse' + self.prm['pref']["general"]["csvSeparator"] + \
'nTrials' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['PSI - Est. Guess Rate', 'UML - Est. Guess Rate']:
headerToWrite = 'threshold' + self.prm['pref']["general"]["csvSeparator"] + \
'guess' + self.prm['pref']["general"]["csvSeparator"] + \
'slope' + self.prm['pref']["general"]["csvSeparator"] + \
'lapse' + self.prm['pref']["general"]["csvSeparator"] + \
'nTrials' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
elif paradigm in ['Digit Span']:
headerToWrite = 'longest_span' + self.prm['pref']["general"]["csvSeparator"] + \
'span_score' + self.prm['pref']["general"]["csvSeparator"] + \
'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
currBlock = 'b'+str(self.prm['currentBlock'])
for i in range(len(self.prm[currBlock]['fieldCheckBox'])):
if self.prm[currBlock]['fieldCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['fieldLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['chooserCheckBox'])):
if self.prm[currBlock]['chooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['chooserLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['fileChooserCheckBox'])):
if self.prm[currBlock]['fileChooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['fileChooserButton'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['dirChooserCheckBox'])):
if self.prm[currBlock]['dirChooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['dirChooserButton'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['paradigmFieldCheckBox'])):
if self.prm[currBlock]['paradigmFieldCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['paradigmFieldLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['paradigmChooserCheckBox'])):
if self.prm[currBlock]['paradigmChooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['paradigmChooserLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
if self.prm[self.parent().currExp]["hasISIBox"] == True:
if self.prm[currBlock]['ISIValCheckBox'] == True:
headerToWrite = headerToWrite + 'ISI (ms)' + self.prm['pref']["general"]["csvSeparator"]
if paradigm not in ['Constant m-Intervals n-Alternatives', 'Multiple Constants m-Intervals n-Alternatives']:
if self.prm[self.parent().currExp]["hasAlternativesChooser"] == True:
if self.prm[currBlock]['nIntervalsCheckBox'] == True:
headerToWrite = headerToWrite + 'Intervals' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['nAlternativesCheckBox'] == True:
headerToWrite = headerToWrite + 'Alternatives' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[self.parent().currExp]["hasAltReps"] == True:
if self.prm[currBlock]['altRepsCheckBox'] == True:
headerToWrite = headerToWrite + 'Alternated (AB) Reps.' + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'Alternated (AB) Reps. ISI (ms)' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightCheckBox'] == True:
headerToWrite = headerToWrite + 'Response Light' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightTypeCheckBox'] == True:
headerToWrite = headerToWrite + 'Response Light Type' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightDurationCheckBox'] == True:
headerToWrite = headerToWrite + 'Response Light Duration' + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + '\n'
if os.path.exists(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+'.csv') == False: #case 1 file does not exist yet
self.resFileSummary = open(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+'.csv', 'w')
self.resFileSummary.write(headerToWrite)
self.resFileSummary.write(resultsLine)
self.resFileSummary.close()
else:
self.resFileSummary = open(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+'.csv', 'r')
allLines = self.resFileSummary.readlines()
self.resFileSummary.close()
try:
h1idx = allLines.index(headerToWrite)
headerPresent = True
except:
headerPresent = False
if headerPresent == True:
#('Header already present')
nextHeaderFound = False
for i in range(h1idx+1, len(allLines)):
#look for next 'experiment or end of file'
if allLines[i][0:6] == 'dprime' or allLines[i][0:4] == 'perc' or allLines[i][0:9] == 'threshold':
nextHeaderFound = True
h2idx = i
break
if nextHeaderFound == True:
#('Next Header Found')
allLines.insert(h2idx, resultsLine)
else:
allLines.append(resultsLine)
#('Next Header Not Found Appending')
elif headerPresent == False:
allLines.append(headerToWrite)
allLines.append(resultsLine)
self.resFileSummary = open(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+'.csv', 'w')
self.resFileSummary.writelines(allLines)
self.resFileSummary.close()
def writeResultsSummaryFullLine(self, paradigm, resultsLine):
if paradigm in ['Transformed Up-Down', 'Transformed Up-Down Limited', 'Weighted Up-Down', 'Weighted Up-Down Limited', 'PEST', 'Transformed Up-Down Hybrid', 'Weighted Up-Down Hybrid']:
headerToWrite = 'adaptive_difference' + self.prm['pref']["general"]["csvSeparator"] + \
'response' + self.prm['pref']["general"]["csvSeparator"]
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write_labels'])):
headerToWrite = headerToWrite + self.prm['additional_parameters_to_write_labels'][p] + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
if paradigm in ['PSI', 'UML', "PSI - Est. Guess Rate", "UML - Est. Guess Rate"]:
headerToWrite = 'adaptive_difference' + self.prm['pref']["general"]["csvSeparator"] + \
'response' + self.prm['pref']["general"]["csvSeparator"]
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write_labels'])):
headerToWrite = headerToWrite + self.prm['additional_parameters_to_write_labels'][p] + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
if paradigm in ['Multiple Constants 1-Pair Same/Different']:
headerToWrite = 'pair' + self.prm['pref']["general"]["csvSeparator"] + \
'stim1' + self.prm['pref']["general"]["csvSeparator"] + \
'stim2' + self.prm['pref']["general"]["csvSeparator"] + \
'case' + self.prm['pref']["general"]["csvSeparator"] + \
'response' + self.prm['pref']["general"]["csvSeparator"]
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write_labels'])):
headerToWrite = headerToWrite + self.prm['additional_parameters_to_write_labels'][p] + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
if paradigm in ['Multiple Constants ABX']:
headerToWrite = 'pair' + self.prm['pref']["general"]["csvSeparator"] + \
'A' + self.prm['pref']["general"]["csvSeparator"] + \
'B' + self.prm['pref']["general"]["csvSeparator"] + \
'X' + self.prm['pref']["general"]["csvSeparator"] + \
'case' + self.prm['pref']["general"]["csvSeparator"] + \
'response' + self.prm['pref']["general"]["csvSeparator"]
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write_labels'])):
headerToWrite = headerToWrite + self.prm['additional_parameters_to_write_labels'][p] + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
if paradigm in ['Multiple Constants Odd One Out']:
headerToWrite = 'subcondition' + self.prm['pref']["general"]["csvSeparator"] + \
'response' + self.prm['pref']["general"]["csvSeparator"]
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write_labels'])):
headerToWrite = headerToWrite + self.prm['additional_parameters_to_write_labels'][p] + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
if paradigm in ['Digit Span']:
headerToWrite = 'sequence_length' + self.prm['pref']["general"]["csvSeparator"] + \
'response' + self.prm['pref']["general"]["csvSeparator"]
if 'additional_parameters_to_write' in self.prm:
for p in range(len(self.prm['additional_parameters_to_write_labels'])):
headerToWrite = headerToWrite + self.prm['additional_parameters_to_write_labels'][p] + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + "response_sequence" + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'condition' + self.prm['pref']["general"]["csvSeparator"] + \
'listener' + self.prm['pref']["general"]["csvSeparator"] + \
'session'+ self.prm['pref']["general"]["csvSeparator"] + \
'experimentLabel'+ self.prm['pref']["general"]["csvSeparator"] + \
'date'+ self.prm['pref']["general"]["csvSeparator"] + \
'time'+ self.prm['pref']["general"]["csvSeparator"] + \
'duration'+ self.prm['pref']["general"]["csvSeparator"] + \
'block'+ self.prm['pref']["general"]["csvSeparator"] + \
'experiment' + self.prm['pref']["general"]["csvSeparator"] + \
'paradigm' + self.prm['pref']["general"]["csvSeparator"]
currBlock = 'b'+str(self.prm['currentBlock'])
for i in range(len(self.prm[currBlock]['fieldCheckBox'])):
if self.prm[currBlock]['fieldCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['fieldLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['chooserCheckBox'])):
if self.prm[currBlock]['chooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['chooserLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['fileChooserCheckBox'])):
if self.prm[currBlock]['fileChooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['fileChooserButton'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['dirChooserCheckBox'])):
if self.prm[currBlock]['dirChooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['dirChooserButton'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['paradigmFieldCheckBox'])):
if self.prm[currBlock]['paradigmFieldCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['paradigmFieldLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['paradigmChooserCheckBox'])):
if self.prm[currBlock]['paradigmChooserCheckBox'][i] == True:
headerToWrite = headerToWrite + self.prm[currBlock]['paradigmChooserLabel'][i] + self.prm['pref']["general"]["csvSeparator"]
if self.prm[self.parent().currExp]["hasISIBox"] == True:
if self.prm[currBlock]['ISIValCheckBox'] == True:
headerToWrite = headerToWrite + 'ISI (ms)' + self.prm['pref']["general"]["csvSeparator"]
if paradigm not in ['Constant m-Intervals n-Alternatives', 'Multiple Constants m-Intervals n-Alternatives']:
if self.prm[self.parent().currExp]["hasAlternativesChooser"] == True:
if self.prm[currBlock]['nIntervalsCheckBox'] == True:
headerToWrite = headerToWrite + 'Intervals' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['nAlternativesCheckBox'] == True:
headerToWrite = headerToWrite + 'Alternatives' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[self.parent().currExp]["hasAltReps"] == True:
if self.prm[currBlock]['altRepsCheckBox'] == True:
headerToWrite = headerToWrite + 'Alternated (AB) Reps.' + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + 'Alternated (AB) Reps. ISI (ms)' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightCheckBox'] == True:
headerToWrite = headerToWrite + 'Response Light' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightTypeCheckBox'] == True:
headerToWrite = headerToWrite + 'Response Light Type' + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightDurationCheckBox'] == True:
headerToWrite = headerToWrite + 'Response Light Duration' + self.prm['pref']["general"]["csvSeparator"]
headerToWrite = headerToWrite + '\n'
if os.path.exists(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+self.prm["pref"]["general"]["fullFileSuffix"]+'.csv') == False: #case 1 file does not exist yet
self.resFileSummaryFull = open(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+self.prm["pref"]["general"]["fullFileSuffix"]+'.csv', 'w')
self.resFileSummaryFull.write(headerToWrite)
self.resFileSummaryFull.write(resultsLine)
self.resFileSummaryFull.close()
else:
self.resFileSummaryFull = open(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+self.prm["pref"]["general"]["fullFileSuffix"]+'.csv', 'r')
allLines = self.resFileSummaryFull.readlines()
self.resFileSummaryFull.close()
try:
h1idx = allLines.index(headerToWrite)
headerPresent = True
except:
headerPresent = False
if headerPresent == True:
#('Header already present')
nextHeaderFound = False
for i in range(h1idx+1, len(allLines)):
#look for next 'experiment or end of file'
if allLines[i][0:8] == 'adaptive' or allLines[i][0:4] == 'perc' or allLines[i][0:9] == 'threshold':
nextHeaderFound = True
h2idx = i
break
if nextHeaderFound == True:
#('Next Header Found')
allLines.insert(h2idx, resultsLine)
else:
allLines.append(resultsLine)
#('Next Header Not Found Appending')
elif headerPresent == False:
allLines.append(headerToWrite)
allLines.append(resultsLine)
self.resFileSummaryFull = open(self.prm['resultsFile'].split('.txt')[0]+ self.prm['pref']["general"]["resTableFileSuffix"]+self.prm["pref"]["general"]["fullFileSuffix"]+'.csv', 'w')
self.resFileSummaryFull.writelines(allLines)
self.resFileSummaryFull.close()
def getCommonTabFields(self, resLineToWrite):
currBlock = 'b' + str(self.prm['currentBlock'])
for i in range(len(self.prm[currBlock]['fieldCheckBox'])):
if self.prm[currBlock]['fieldCheckBox'][i] == True:
resLineToWrite = resLineToWrite + self.currLocale.toString(self.prm[currBlock]['field'][i], precision=self.prm["pref"]["general"]["precision"]) + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['chooserCheckBox'])):
if self.prm[currBlock]['chooserCheckBox'][i] == True:
resLineToWrite = resLineToWrite + self.prm[currBlock]['chooser'][i].split(':')[0] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['fileChooserCheckBox'])):
if self.prm[currBlock]['fileChooserCheckBox'][i] == True:
resLineToWrite = resLineToWrite + self.prm[currBlock]['fileChooser'][i].split(':')[0] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['dirChooserCheckBox'])):
if self.prm[currBlock]['dirChooserCheckBox'][i] == True:
resLineToWrite = resLineToWrite + self.prm[currBlock]['dirChooser'][i].split(':')[0] + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['paradigmFieldCheckBox'])):
if self.prm[currBlock]['paradigmFieldCheckBox'][i] == True:
resLineToWrite = resLineToWrite + self.currLocale.toString(self.prm[currBlock]['paradigmField'][i],
precision=self.prm["pref"]["general"]["precision"]) + self.prm['pref']["general"]["csvSeparator"]
for i in range(len(self.prm[currBlock]['paradigmChooserCheckBox'])):
if self.prm[currBlock]['paradigmChooserCheckBox'][i] == True:
resLineToWrite = resLineToWrite + self.prm[currBlock]['paradigmChooser'][i].split(':')[0] + self.prm['pref']["general"]["csvSeparator"]
if self.prm[self.parent().currExp]["hasISIBox"] == True:
if self.prm[currBlock]['ISIValCheckBox'] == True:
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['ISIVal']) + self.prm['pref']["general"]["csvSeparator"]
if self.prm['paradigm'] not in ['Constant m-Intervals n-Alternatives', 'Multiple Constants m-Intervals n-Alternatives']:
if self.prm[self.parent().currExp]["hasAlternativesChooser"] == True:
if self.prm[currBlock]['nIntervalsCheckBox'] == True:
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['nIntervals']) + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['nAlternativesCheckBox'] == True:
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['nAlternatives']) + self.prm['pref']["general"]["csvSeparator"]
if self.prm[self.parent().currExp]["hasAltReps"] == True:
if self.prm[currBlock]['altRepsCheckBox'] == True:
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['altReps']) + self.prm['pref']["general"]["csvSeparator"]
resLineToWrite = resLineToWrite + str(self.prm[currBlock]['altRepsISI']) + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightCheckBox'] == True:
resLineToWrite = resLineToWrite + self.prm[currBlock]['responseLight'] + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightTypeCheckBox'] == True:
resLineToWrite = resLineToWrite + self.prm[currBlock]['responseLightType'] + self.prm['pref']["general"]["csvSeparator"]
if self.prm[currBlock]['responseLightDurationCheckBox'] == True:
resLineToWrite = resLineToWrite + self.currLocale.toString(self.prm[currBlock]['responseLightDuration']) + self.prm['pref']["general"]["csvSeparator"]
return resLineToWrite
def sendEndNotification(self):
currBlock = 'b'+ str(self.prm['currentBlock'])
subject = self.tr("Pychoacoustics Notification: Listener ") + self.prm['listener'] + self.tr(" has ") \
+ str(self.prm['pref']['email']['nBlocksNotify']) + self.tr(" block(s) to go")
body = subject + "\n" + self.tr("Experiment: ") + self.parent().currExp + \
'\n' + self.tr("Completed Blocks: ") + str(self.prm['currentBlock']) + self.tr(" Stored Blocks: ") + str(self.prm['storedBlocks'])
self.emailThread.sendEmail(subject, body)
def sendData(self):
currBlock = 'b'+ str(self.prm['currentBlock'])
subject = 'Pychoacoustics Data, Listener: ' + self.prm['listener'] + ', Experiment: ' + \
self.parent().currExp
body = ''
filesToSend = [self.pychovariablesSubstitute[self.pychovariables.index("[resFile]")],
self.pychovariablesSubstitute[self.pychovariables.index("[resFileTrial]")],
self.pychovariablesSubstitute[self.pychovariables.index("[resTable]")]] #self.prm['resultsFile'], self.prm['resultsFile'].split('.txt')[0]+self.prm['pref']["general"]["fullFileSuffix"]+'.txt']
if self.prm["allBlocks"]["procRes"] == True:
filesToSend.append(self.pychovariablesSubstitute[self.pychovariables.index("[resFileSess]")])#self.prm['resultsFile'].split('.txt')[0] + self.prm['pref']["general"]["resFileSuffix"]+'.txt')
if self.prm["allBlocks"]["procResTable"] == True:
filesToSend.append(self.pychovariablesSubstitute[self.pychovariables.index("[resTableSess]")])
if self.prm["allBlocks"]["pdfPlot"] == True:
filesToSend.append(self.pychovariablesSubstitute[self.pychovariables.index("[pdfPlot]")])
filesToSendChecked = []
for fName in filesToSend:
if os.path.exists(fName):
filesToSendChecked.append(fName)
else:
print('Could not find: ', fName)
self.emailThread.sendEmail(subject, body, filesToSendChecked)
def processResultsEnd(self):
resFilePath = self.prm['resultsFile']
if self.prm['paradigm'] in [self.tr("Transformed Up-Down"),
self.tr("Weighted Up-Down"),
self.tr("Transformed Up-Down Limited"),
self.tr("Weighted Up-Down Limited"),
self.tr("PEST")]:
processResultsAdaptive([resFilePath])
elif self.prm['paradigm'] in [self.tr("Transformed Up-Down Interleaved"),
self.tr("Weighted Up-Down Interleaved")]:
processResultsAdaptiveInterleaved([resFilePath])
elif self.prm['paradigm'] in [self.tr("Constant 1-Interval 2-Alternatives")]:
processResultsConstant1Interval2Alternatives([resFilePath], dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Multiple Constants 1-Interval 2-Alternatives")]:
processResultsMultipleConstants1Interval2Alternatives([resFilePath], dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Constant m-Intervals n-Alternatives")]:
processResultsConstantMIntervalsNAlternatives([resFilePath])
elif self.prm['paradigm'] in [self.tr("Multiple Constants m-Intervals n-Alternatives")]:
processResultsMultipleConstantsMIntervalsNAlternatives([resFilePath])
elif self.prm['paradigm'] in [self.tr("Constant 1-Pair Same/Different")]:
processResultsConstant1PairSameDifferent([resFilePath], dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
def processResultsTableEnd(self):
separator = self.parent().prm['pref']["general"]["csvSeparator"]
resFilePath = self.pychovariablesSubstitute[self.pychovariables.index("[resTable]")]
if self.prm['paradigm'] in [self.tr("Transformed Up-Down"),
self.tr("Weighted Up-Down"),
self.tr("Transformed Up-Down Limited"),
self.tr("Weighted Up-Down Limited"),
self.tr("PEST")]:
procResTableAdaptive([resFilePath], fout=None, separator=separator)
elif self.prm['paradigm'] in [self.tr("Transformed Up-Down Interleaved"),
self.tr("Weighted Up-Down Interleaved")]:
procResTableAdaptiveInterleaved([resFilePath], fout=None, separator=separator)
elif self.prm['paradigm'] in [self.tr("Constant 1-Interval 2-Alternatives")]:
procResTableConstant1Int2Alt([resFilePath], fout=None, separator=separator, dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Multiple Constants 1-Interval 2-Alternatives")]:
procResTableMultipleConstants1Int2Alt([resFilePath], fout=None, separator=separator, dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Constant m-Intervals n-Alternatives")]:
procResTableConstantMIntNAlt([resFilePath], fout=None, separator=separator)
elif self.prm['paradigm'] in [self.tr("Multiple Constants m-Intervals n-Alternatives")]:
procResTableMultipleConstantsMIntNAlt([resFilePath], fout=None, separator=separator)
elif self.prm['paradigm'] in [self.tr("Constant 1-Pair Same/Different")]:
procResTableConstant1PairSameDifferent([resFilePath], fout=None, separator=separator, dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Multiple Constants 1-Pair Same/Different")]:
procResTableMultipleConstants1PairSameDifferent([resFilePath], fout=None, separator=separator, dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Multiple Constants ABX")]:
procResTableMultipleConstantsABX([resFilePath], fout=None, separator=separator, dprimeCorrection=self.prm['pref']['general']['dprimeCorrection'])
elif self.prm['paradigm'] in [self.tr("Multiple Constants Odd One Out")]:
procResTableMultipleConstantsOddOneOut([resFilePath], fout=None, separator=separator)
def plotDataEnd(self, winPlot, pdfPlot):
if self.prm['appData']['plotting_available']:
resFilePath = self.pychovariablesSubstitute[self.pychovariables.index("[resTable]")]
summaryResFilePath = resFilePath.split('.csv')[0] + self.prm["pref"]["general"]["sessSummResFileSuffix"] + '.csv'
separator = self.parent().prm['pref']["general"]["csvSeparator"]
resProcTableAvailable = True
if self.prm['paradigm'] in [self.tr("Transformed Up-Down"),
self.tr("Weighted Up-Down"),
self.tr("Transformed Up-Down Limited"),
self.tr("Weighted Up-Down Limited"),
self.tr("PEST")]:
paradigm = 'adaptive'
elif self.prm['paradigm'] in [self.tr("Transformed Up-Down Interleaved"),
self.tr("Weighted Up-Down Interleaved")]:
paradigm = 'adaptive_interleaved'
elif self.prm['paradigm'] in [self.tr("Constant 1-Interval 2-Alternatives")]:
paradigm = 'constant1Interval2Alternatives'
elif self.prm['paradigm'] in [self.tr("Constant m-Intervals n-Alternatives")]:
paradigm = 'constantMIntervalsNAlternatives'
elif self.prm['paradigm'] in [self.tr("Multiple Constants 1-Interval 2-Alternatives")]:
paradigm = 'multipleConstants1Interval2Alternatives'
elif self.prm['paradigm'] in [self.tr("Constant m-Intervals n-Alternatives")]:
paradigm ='constantMIntervalsNAlternatives'
elif self.prm['paradigm'] in [self.tr("Multiple Constants m-Intervals n-Alternatives")]:
paradigm = 'multipleConstantsMIntervalsNAlternatives'
elif self.prm['paradigm'] in [self.tr("Constant 1-Pair Same/Different")]:
paradigm = 'constant1PairSD'
elif self.prm['paradigm'] in [self.tr("Multiple Constants 1-Pair Same/Different")]:
paradigm = 'multipleConstants1PairSD'
elif self.prm['paradigm'] in [self.tr("Multiple Constants ABX")]:
paradigm = 'multipleConstantsABX'
if self.prm['paradigm'] in ["UML", "PSI"]:
resProcTableAvailable = False
if resProcTableAvailable == True:
categoricalPlot(self, 'average', summaryResFilePath, winPlot, pdfPlot, paradigm, separator, None, self.prm)
def parseCustomCommandArguments(self, cmd):
for vr in self.pychovariables:
cmd = str.replace(cmd, vr, self.pychovariablesSubstitute[self.pychovariables.index(vr)])
return cmd
def playEndMessage(self):
idx = get_list_indices(self.prm['pref']['general']['endMessageFilesUse'], "\u2713")
idChosen = random.choice(idx)
msgSnd, fs = self.audioManager.loadWavFile(self.prm['pref']['general']['endMessageFiles'][idChosen], self.prm['pref']['general']['endMessageLevels'][idChosen], self.prm['allBlocks']['maxLevel'], 'Both')
self.playThread.playThreadedSound(msgSnd, fs, self.prm['allBlocks']['nBits'], self.prm['pref']['sound']['playCommand'], False, 'foo.wav')
class responseLight(QWidget):
def __init__(self, parent):
super(responseLight, self).__init__(parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding))
self.correctLightColor = self.parent().parent().prm["pref"]["resp_box"]["correctLightColor"]
self.incorrectLightColor = self.parent().parent().prm["pref"]["resp_box"]["incorrectLightColor"]
self.neutralLightColor = self.parent().parent().prm["pref"]["resp_box"]["neutralLightColor"]
self.offLightColor = self.parent().parent().prm["pref"]["resp_box"]["offLightColor"]
self.borderColor = Qt.black
self.lightColor = self.offLightColor#Qt.black
self.feedbackText = ""
self.responseLightType = self.tr("Light") #this is just for inizialization purposes
self.rb = self.parent() #response box
self.cw = self.parent().parent() #control window
self.correctSmiley = QIcon.fromTheme("face-smile", QIcon(":/face-smile"))
self.incorrectSmiley = QIcon.fromTheme("face-sad", QIcon(":/face-sad"))
self.neutralSmiley = QIcon.fromTheme("face-plain", QIcon(":/face-plain"))
self.offSmiley = QIcon() #create just a null icon
self.feedbackSmiley = self.offSmiley
def giveFeedback(self, feedback):
currBlock = 'b'+ str(self.parent().parent().prm['currentBlock'])
self.responseLightType = self.parent().parent().prm[currBlock]['responseLightType']
self.setStatus(feedback)
self.parent().repaint()
QApplication.processEvents()
time.sleep(self.parent().parent().prm[currBlock]['responseLightDuration']/1000)
self.setStatus('off')
self.parent().repaint()
QApplication.processEvents()
def setStatus(self, status):
self.correctLightColor = self.cw.prm["pref"]["resp_box"]["correctLightColor"]
self.incorrectLightColor = self.cw.prm["pref"]["resp_box"]["incorrectLightColor"]
self.neutralLightColor = self.cw.prm["pref"]["resp_box"]["neutralLightColor"]
self.offLightColor = self.cw.prm["pref"]["resp_box"]["offLightColor"]
if self.responseLightType in [self.tr("Light"), self.tr("Light & Text"), self.tr("Light & Smiley"), self.tr("Light & Text & Smiley")]:
if status == 'correct':
self.lightColor = self.correctLightColor#Qt.green
elif status == 'incorrect':
self.lightColor = self.incorrectLightColor #Qt.red
elif status == 'neutral':
self.lightColor = self.neutralLightColor #Qt.white
elif status == 'off':
self.lightColor = self.offLightColor #Qt.black
if self.responseLightType in [self.tr("Text"), self.tr("Light & Text"), self.tr("Text & Smiley"), self.tr("Light & Text & Smiley")]:
if status == 'correct':
if self.cw.prm["pref"]["resp_box"]["correctTextFeedbackUserSet"] == True:
self.feedbackText = self.cw.prm["pref"]["resp_box"]["userSetCorrectTextFeedback"]
else:
self.feedbackText = self.cw.prm['rbTrans'].translate('rb', self.cw.prm["pref"]["resp_box"]["correctTextFeedback"])
self.penColor = self.cw.prm["pref"]["resp_box"]["correctTextColor"]
elif status == 'incorrect':
if self.cw.prm["pref"]["resp_box"]["incorrectTextFeedbackUserSet"] == True:
self.feedbackText = self.cw.prm["pref"]["resp_box"]["userSetIncorrectTextFeedback"]
else:
self.feedbackText = self.cw.prm['rbTrans'].translate('rb', self.cw.prm["pref"]["resp_box"]["incorrectTextFeedback"])
self.penColor = self.cw.prm["pref"]["resp_box"]["incorrectTextColor"]
elif status == 'neutral':
if self.cw.prm["pref"]["resp_box"]["neutralTextFeedbackUserSet"] == True:
self.feedbackText = self.cw.prm["pref"]["resp_box"]["userSetNeutralTextFeedback"]
else:
self.feedbackText = self.cw.prm['rbTrans'].translate('rb', self.cw.prm["pref"]["resp_box"]["neutralTextFeedback"])
self.penColor = self.cw.prm["pref"]["resp_box"]["neutralTextColor"]
elif status == 'off':
if self.cw.prm["pref"]["resp_box"]["offTextFeedbackUserSet"] == True:
self.feedbackText = self.cw.prm["pref"]["resp_box"]["userSetOffTextFeedback"]
else:
self.feedbackText = self.cw.prm['rbTrans'].translate('rb', self.cw.prm["pref"]["resp_box"]["offTextFeedback"])
self.penColor = self.cw.prm["pref"]["resp_box"]["offTextColor"]
if self.responseLightType in [self.tr("Smiley"), self.tr("Light & Smiley"), self.tr("Text & Smiley"), self.tr("Light & Text & Smiley")]:
if status == 'correct':
self.feedbackSmiley = self.correctSmiley
elif status == 'incorrect':
self.feedbackSmiley = self.incorrectSmiley
elif status == 'neutral':
self.feedbackSmiley = self.neutralSmiley
elif status == 'off':
self.feedbackSmiley = self.offSmiley
def paintEvent(self, event=None):
if self.responseLightType == self.tr("Light"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setPen(self.borderColor)
painter.setBrush(self.lightColor)
painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
elif self.responseLightType == self.tr("Text"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setBrush(self.offLightColor)
painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
r = QtCore.QRectF(0,0,self.width(),self.height())
painter.setPen(self.penColor)
qfont = QFont()
qfont.fromString(self.cw.prm["pref"]["resp_box"]["responseLightFont"])
painter.setFont(qfont)
painter.drawText(r, Qt.AlignCenter, self.feedbackText)
elif self.responseLightType == self.tr("Smiley"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setBrush(self.offLightColor)
rect = painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
rect = QRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
self.feedbackSmiley.paint(painter, rect, Qt.AlignCenter)
elif self.responseLightType == self.tr("Light & Text"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setPen(self.borderColor)
painter.setBrush(self.lightColor)
painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
r = QtCore.QRectF(0,0,self.width(),self.height())
painter.setPen(self.penColor)
qfont = QFont()
qfont.fromString(self.cw.prm["pref"]["resp_box"]["responseLightFont"])
painter.setFont(qfont)
painter.drawText(r, Qt.AlignCenter, self.feedbackText)
elif self.responseLightType == self.tr("Light & Smiley"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setBrush(self.lightColor)
rect = painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
rect = QRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
self.feedbackSmiley.paint(painter, rect, Qt.AlignCenter)
elif self.responseLightType == self.tr("Text & Smiley"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setBrush(self.offLightColor)
rect = painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
rectRight = QRect(self.width()/60, self.height()/60, self.width()+self.width()/2, self.height())
self.feedbackSmiley.paint(painter, rectRight, Qt.AlignCenter)
rectLeft = QRect(self.width()/60, self.height()/60, self.width()-self.width()/2, self.height())
self.feedbackSmiley.paint(painter, rectLeft, Qt.AlignCenter)
r = QtCore.QRectF(0,0,self.width(),self.height())
painter.setPen(self.penColor)
qfont = QFont()
qfont.fromString(self.cw.prm["pref"]["resp_box"]["responseLightFont"])
painter.setFont(qfont)
painter.drawText(r, Qt.AlignCenter, self.feedbackText)
elif self.responseLightType == self.tr("Light & Text & Smiley"):
painter = QPainter(self)
painter.setViewport(0,0,self.width(),self.height())
painter.setBrush(self.lightColor)
rect = painter.drawRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height())
rectRight = QRect(self.width()/60, self.height()/60, self.width()+self.width()/2, self.height())
self.feedbackSmiley.paint(painter, rectRight, Qt.AlignCenter)
rectLeft = QRect(self.width()/60, self.height()/60, self.width()-self.width()/2, self.height())
self.feedbackSmiley.paint(painter, rectLeft, Qt.AlignCenter)
r = QtCore.QRectF(0,0,self.width(),self.height())
painter.setPen(self.penColor)
qfont = QFont()
qfont.fromString(self.cw.prm["pref"]["resp_box"]["responseLightFont"])
painter.setFont(qfont)
painter.drawText(r, Qt.AlignCenter, self.feedbackText)
class intervalLight(QFrame):
def __init__(self, parent):
QFrame.__init__(self, parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.borderColor = Qt.red
self.lightColor = Qt.black
def setStatus(self, status):
if status == 'on':
self.lightColor = Qt.white
elif status == 'off':
self.lightColor = Qt.black
self.parent().repaint()
QApplication.processEvents()
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setViewport(0, 0, self.width(),self.height())
painter.setPen(self.borderColor)
painter.setBrush(self.lightColor)
painter.fillRect(self.width()/60, self.height()/60, self.width()-self.width()/30, self.height(), self.lightColor)
class threadedPlayer(QThread):
def __init__(self, parent):
QThread.__init__(self, parent)
def playThreadedSound(self, sound, sampRate, bits, cmd, writewav, fName):
self.sound = sound
self.sampRate = sampRate
self.bits = bits
self.cmd = cmd
self.writewav = writewav
self.fName = fName
self.start()
self.audioManager = self.parent().audioManager
def run(self):
self.audioManager.playSound(self.sound, self.sampRate, self.bits, self.cmd, self.writewav, self.fName)
class commandExecuter(QThread):
def __init__(self, parent):
QThread.__init__(self, parent)
def executeCommand(self, cmd):
self.cmd = cmd
self.start()
def run(self):
for i in range(len(self.cmd)):
os.system(self.cmd[i])
class emailSender(QThread):
def __init__(self, parent):
QThread.__init__(self, parent)
def sendEmail(self, subject='', body='', attachments=[]):
self.subject = subject
self.body = body
self.attachments = attachments
self.start()
def run(self):
experimenterIdx = self.parent().prm['experimenter']['experimenter_id'].index(self.parent().prm['allBlocks']['currentExperimenter'])
decoded_passwd = bytes(self.parent().prm['pref']['email']['fromPassword'], "utf-8")
decoded_passwd = base64.b64decode(decoded_passwd)
decoded_passwd = str(decoded_passwd, "utf-8")
msg = MIMEMultipart()
msg["From"] = self.parent().prm['pref']['email']['fromUsername']
msg["To"] = self.parent().prm['experimenter']['experimenter_email'][experimenterIdx]
msg["Subject"] = self.subject
part1 = MIMEText(self.body, 'plain')
msg.attach(part1)
for item in self.attachments:
part = MIMEBase('application', "octet-stream")
filePath = item
part.set_payload(open(filePath, "rb").read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filePath))
msg.attach(part)
if checkEmailValid(msg["To"]) == False:
errMsg = self.parent().tr("Experimenter {} e-mail's address {} not valid \n Please specify a valid address for the current experimenter \n in the Edit -> Experimenters dialog".format(self.parent().parent().prm['experimenter']['experimenter_id'][experimenterIdx], msg["To"]))
print(errMsg, file=sys.stderr)
return
elif checkUsernameValid(msg["From"]) == False:
errMsg = self.parent().tr("username invalid")
print(errMsg, file=sys.stderr)
return
elif checkServerValid(self.parent().prm["pref"]["email"]['SMTPServer']) == False:
errMsg = self.parent().tr("SMTP server name invalid")
print(errMsg, file=sys.stderr)
return
if self.parent().prm["pref"]["email"]["serverRequiresAuthentication"] == True:
if self.parent().prm["pref"]["email"]['SMTPServerSecurity'] == "TLS/SSL (a)":
try:
server = smtplib.SMTP_SSL(self.parent().prm["pref"]["email"]['SMTPServer'], self.parent().prm["pref"]["email"]['SMTPServerPort'])
except Exception as ex:
errMsg = self.parent().tr("Something went wrong, try to change server settings \n {}".format(ex))
print(errMsg, file=sys.stderr)
return
elif self.parent().prm["pref"]["email"]['SMTPServerSecurity'] == "TLS/SSL (b)":
try:
server = smtplib.SMTP(self.parent().prm["pref"]["email"]['SMTPServer'], self.parent().prm["pref"]["email"]['SMTPServerPort'])
server.ehlo()
server.starttls()
except Exception as ex:
errMsg = self.parent().tr("Something went wrong, try to change server settings \n {}".format(ex))
print(errMsg, file=sys.stderr)
return
elif self.parent().prm["pref"]["email"]['SMTPServerSecurity'] == "none":
try:
server = smtplib.SMTP(self.parent().prm["pref"]["email"]['SMTPServer'], self.parent().prm["pref"]["email"]['SMTPServerPort'])
except Exception as ex:
errMsg = self.parent().tr("Something went wrong, try to change server settings \n {}".format(ex))
print(errMsg, file=sys.stderr)
return
# now attempt login
try:
server.login(self.parent().prm['pref']['email']['fromUsername'], decoded_passwd)
except Exception as ex:
errMsg = self.parent().tr("Something went wrong, try to change server settings \n {}".format(ex))
print(errMsg, file=sys.stderr)
return
else:
try:
server = smtplib.SMTP(self.parent().prm["pref"]["email"]['SMTPServer'], self.parent().prm["pref"]["email"]['SMTPServerPort'])
except Exception as ex:
errMsg = self.parent().tr("Something went wrong, try to change server settings \n {}".format(ex))
print(errMsg, file=sys.stderr)
return
try:
server.sendmail(msg["From"], msg["To"], msg.as_string())
print('e-mail sent successfully', file=sys.stdout)
except Exception as ex:
errMsg = self.parent().tr("Something went wrong, try to change server settings \n {}".format(ex))
print(errMsg, file=sys.stderr)
return
class ValidDigitSequence(QValidator):
def __init__(self, parent):
QValidator.__init__(self, parent)
def validate(self, s, pos):
self.regexp = QRegExp("[0-9]+")
if s == "":
return (QValidator.Intermediate, s, pos)
elif not self.regexp.exactMatch(s):
return (QValidator.Invalid, s, pos)
else:
return (QValidator.Acceptable,s, pos)
| gpl-3.0 |
rgommers/statsmodels | statsmodels/tsa/tests/test_stattools.py | 1 | 11576 | from statsmodels.compat.python import lrange
from statsmodels.tsa.stattools import (adfuller, acf, pacf_ols, pacf_yw,
pacf, grangercausalitytests,
coint, acovf,
arma_order_select_ic)
from statsmodels.tsa.base.datetools import dates_from_range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
dec, assert_)
from numpy import genfromtxt#, concatenate
from statsmodels.datasets import macrodata, sunspots
from pandas import Series, Index, DataFrame
import os
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
x = data.data['realgdp']
y = data.data['infl']
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="c", autolag=None,
maxlag=4)
self.teststat = .97505319
self.pvalue = .99399563
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="ct", autolag=None,
maxlag=4)
self.teststat = -1.8566374
self.pvalue = .67682968
self.critvalues = [-4.007, -3.437, -3.137]
#class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
#TODO: get test values from R?
class TestADFNoConstant(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="nc", autolag=None,
maxlag=4)
self.teststat = 3.5227498
self.pvalue = .99999 # Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
self.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="c", autolag=None,
maxlag=1)
self.teststat = -4.3346988
self.pvalue = .00038661
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="ct", autolag=None,
maxlag=1)
self.teststat = -4.425093
self.pvalue = .00199633
self.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="nc", autolag=None,
maxlag=1)
self.teststat = -2.4511596
self.pvalue = 0.013747 # Stata does not return a p-value for noconstant
# this value is just taken from our results
self.critvalues = [-2.587,-1.950,-1.617]
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load()
x = data.data['realgdp']
filename = os.path.dirname(os.path.abspath(__file__))+\
"/results/results_corrgram.csv"
results = genfromtxt(open(filename, "rb"), delimiter=",", names=True,dtype=float)
#not needed: add 1. for lag zero
#self.results['acvar'] = np.concatenate(([1.], self.results['acvar']))
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
def __init__(self):
self.acf = self.results['acvar']
#self.acf = np.concatenate(([1.], self.acf))
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, alpha=.05)
self.confint_res = self.results[['acvar_lb','acvar_ub']].view((float,
2))
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:,None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# def pvalue(self):
# pass
#NOTE: shouldn't need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
"""
Test Autocorrelation Function using FFT
"""
def __init__(self):
self.acf = self.results['acvarfft']
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
def __init__(self):
self.pacfols = self.results['PACOLS']
self.pacfyw = self.results['PACYW']
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:,None]
# from edited Stata ado file
res = [[-.1375625, .1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="ldu")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
y1 = data.data['realcons']
y2 = data.data['realgdp']
def test_tstat(self):
assert_almost_equal(self.coint_t,self.teststat, DECIMAL_4)
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
def __init__(self):
self.coint_t = coint(self.y1, self.y2, regression ="c")[0]
self.teststat = -1.8208817
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp', 'realcons']]
data = mdata.view((float, 2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'], decimal=7)
def test_granger_fails_on_nobs_check(self):
# Test that if maxlag is too large, Granger Test raises a clear error.
X = np.random.rand(10, 2)
grangercausalitytests(X, 2, verbose=False) # This should pass.
assert_raises(ValueError, grangercausalitytests, X, 3, verbose=False)
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = Index(dates_from_range('1700', '2008'))
del dta["YEAR"]
res = acovf(dta)
assert_equal(res, acovf(dta.values))
X = np.random.random((10,2))
assert_raises(ValueError, acovf, X)
def test_acovf_fft_vs_convolution():
np.random.seed(1)
q = np.random.normal(size=100)
for demean in [True, False]:
for unbiased in [True, False]:
F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True)
F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@dec.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
import statsmodels.api as sm
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
# regression tests in case we change algorithm to minic in sas
aic_x = np.array([[ np.nan, 552.7342255 , 484.29687843],
[ 562.10924262, 485.5197969 , 480.32858497],
[ 507.04581344, 482.91065829, 481.91926034],
[ 484.03995962, 482.14868032, 483.86378955],
[ 481.8849479 , 483.8377379 , 485.83756612]])
bic_x = np.array([[ np.nan, 559.77714733, 494.86126118],
[ 569.15216446, 496.08417966, 494.41442864],
[ 517.61019619, 496.99650196, 499.52656493],
[ 498.12580329, 499.75598491, 504.99255506],
[ 499.49225249, 504.96650341, 510.48779255]])
aic = DataFrame(aic_x , index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x , index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
res = arma_order_select_ic(y, ic='aic', trend='nc')
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
y = np.array([ 0.86074377817203640006, 0.85316549067906921611,
0.87104653774363305363, 0.60692382068987393851,
0.69225941967301307667, 0.73336177248909339976,
0.03661329261479619179, 0.15693067239962379955,
0.12777403512447857437, -0.27531446294481976 ,
-0.24198139631653581283, -0.23903317951236391359,
-0.26000241325906497947, -0.21282920015519238288,
-0.15943768324388354896, 0.25169301564268781179,
0.1762305709151877342 , 0.12678133368791388857,
0.89755829086753169399, 0.82667068795350151511])
res = arma_order_select_ic(y)
if __name__=="__main__":
import nose
# nose.runmodule(argv=[__file__, '-vvs','-x','-pdb'], exit=False)
import numpy as np
np.testing.run_module_suite()
| bsd-3-clause |
HWal/paparazzi | sw/airborne/test/ahrs/ahrs_utils.py | 86 | 4923 | #! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
| gpl-2.0 |
rgommers/statsmodels | statsmodels/sandbox/examples/example_gam.py | 33 | 2343 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
alexis-roche/register | doc/conf.py | 5 | 6641 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# sampledoc documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 3 12:40:24 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# Get project related strings. Please do not change this line to use
# execfile because execfile is not available in Python 3
_info_fname = os.path.join('..', 'nipy', 'info.py')
rel = {}
exec(open(_info_fname, 'rt').read(), {}, rel)
# Import support for ipython console session syntax highlighting (lives
# in the sphinxext directory defined above)
import ipython_console_highlighting
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'numpy_ext.numpydoc',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives', # needed for above
]
# Autosummary on
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'nipy'
#copyright = ':ref:`2005-2010, Neuroimaging in Python team. <nipy-software-license>`'
copyright = '2005-2013, Neuroimaging in Python team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = rel['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directories, that shouldn't
# be searched for source files.
# exclude_trees = []
# what to put into API doc (just class doc, just init, or both)
autoclass_content = 'class'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
#
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'nipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'NIPY Documentation'
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = project
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('documentation', 'nipy.tex', 'Neuroimaging in Python Documentation',
ur'Neuroimaging in Python team.','manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsmath}
\usepackage{amssymb}
% Uncomment these two if needed
%\usepackage{amsfonts}
%\usepackage{txfonts}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
| bsd-3-clause |
eteq/bokeh | examples/plotting/server/glucose.py | 18 | 1612 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import pandas as pd
from bokeh.sampledata.glucose import data
from bokeh.plotting import figure, show, output_server, vplot
output_server("glucose")
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p1 = figure(x_axis_type="datetime", tools=TOOLS)
p1.line(data.index, data['glucose'], color='red', legend='glucose')
p1.line(data.index, data['isig'], color='blue', legend='isig')
p1.title = "Glucose Measurements"
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Value'
day = data.ix['2010-10-06']
highs = day[day['glucose'] > 180]
lows = day[day['glucose'] < 80]
p2 = figure(x_axis_type="datetime", tools=TOOLS)
p2.line(day.index.to_series(), day['glucose'],
line_color="gray", line_dash="4 4", line_width=1, legend="glucose")
p2.circle(highs.index, highs['glucose'], size=6, color='tomato', legend="high")
p2.circle(lows.index, lows['glucose'], size=6, color='navy', legend="low")
p2.title = "Glucose Range"
p2.xgrid[0].grid_line_color=None
p2.ygrid[0].grid_line_alpha=0.5
p2.xaxis.axis_label = 'Time'
p2.yaxis.axis_label = 'Value'
data['inrange'] = (data['glucose'] < 180) & (data['glucose'] > 80)
window = 30.5*288 #288 is average number of samples in a month
inrange = pd.rolling_sum(data.inrange, window)
inrange = inrange.dropna()
inrange = inrange/float(window)
p3 = figure(x_axis_type="datetime", tools=TOOLS)
p3.line(inrange.index, inrange, line_color="navy")
p3.title = "Glucose In-Range Rolling Sum"
p3.xaxis.axis_label = 'Date'
p3.yaxis.axis_label = 'Proportion In-Range'
show(vplot(p1,p2,p3))
| bsd-3-clause |
pygeo/pycmbs | pycmbs/__init__.py | 1 | 1379 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import os
import json
ppath = os.path.dirname(os.path.realpath(__file__))
__name__ = "pycmbs"
"""The project name."""
__author__ = "Alexander Loew"
"""The primary author of pyCMBS."""
__institute__ = "Max-Planck-Institute for Meteorology (MPI-M)"
__copyright__ = "Copyright (c) 2011-2015 Alexander Loew"
"""The copyright holder of pyCMBS."""
__license__ = "MIT License, see LICENSE.md for details"
"""The license governing the use and distribution of pyCMBS."""
__url__ = "https://github.com/pygeo/pycmbs"
"""The URL for pyCMBS's homepage."""
__date__ = "2014-06-05"
"""The release date of this version of pyCMBS."""
# note that a proper version number is required as PIP otherwise can not
# detect the file
__version__ = json.load(open(ppath + os.sep + 'version.json'))
"""Version number of pyCMBS."""
__email__ = "[email protected]"
# set globally plotting backend
try:
import matplotlib
matplotlib.use('Agg')
except:
print('Some problem with import of matplotlib happened')
try:
from mapping import MultipleMap, SingleMap, MapPlotGeneric
except:
pass
# set automatically directory where pycmbs is located
os.environ.update({'PYCMBSPATH': os.path.dirname(os.path.realpath(__file__))})
| mit |
AdaptivePELE/AdaptivePELE | AdaptivePELE/analysis/count_clusters.py | 1 | 7849 | import numpy as np
import os
import mdtraj as md
import glob
import matplotlib.pyplot as plt
import argparse
def obtainLigandIndexes(trajectory, ligand):
"""
Extract the indexes for the ligand in the trajectory
:param trajectory: mdtraj trajectory
:param ligand: name of the ligand
:return: list of the atom indexes of the heavy atoms of the ligand
"""
residueIndexes = []
for residue in trajectory.topology.residues:
if residue.name == ligand:
for atom in residue.atoms:
if "H" not in atom.name:
residueIndexes.append(atom.index)
return residueIndexes
def calculate_norm(member1, member2, clust):
"""
Calculates the euclidian distance of two mass centers and adds the cluster information
:param member1: coords of the first mass center
:param member2: coords of the second mass center
:param clust: cluster that is beign processed
:return:
"""
return (np.linalg.norm(member1 - member2), clust)
def extrac_traj_num(traj):
"""
Extracts the number of the trajectory
:param traj: name of the trajectory (str)
:return: number of the trajectory (int)
"""
num = traj.split(".")[0].split("_")[-1]
return int(num)
def load_cluster_data(clusters_file):
"""
Loads into a dictionary the coords of each cluster center
:param clusters_file: name of the MSM clusters file
:return: dict with the clusters as keys and the coords as values
"""
clusters_data = {}
with open(clusters_file, "r") as inputfile:
for line in inputfile:
line = line.split()
clusters_data[int(line[1])] = [float(line[-6]), float(line[-5]), float(line[-4])]
return clusters_data
def parseArguments():
"""
Parse command line arguments
:returns: str, str, str, str, str, str, bool, bool -- path with the MSM clusters.pdb file,
path to the reference file, name of the ligand, path to the trajectories,
string that matches the trajectories names, path to the topology if needed,
whether to plot the clustering distances, wether to plot the trajectory data
"""
desc = """ Script that plots the MSM clusters that each trajectory has and the distance
of the clusters to the reference position """
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-clust", required=True, help="Path with the MSM clusters.pdb file")
parser.add_argument("-ref", required=True, help="Path to the reference file")
parser.add_argument("-lig", required=True, help="Name of the Ligand")
parser.add_argument("-dat", required=True, help="Path to the trajectory data")
parser.add_argument("-name", default="traj*", help="Template that matches the trajectory names")
parser.add_argument("-top", default=None, help="Path to the topology file if needed")
parser.add_argument("-dists", action="store_true", default=False, help="whether to plot the clustering distances or not")
parser.add_argument("-clustdat", action="store_true", default=False, help="whether to plot the Trajectory data or not")
args = parser.parse_args()
return args.clust, args.ref, args.lig, args.dat, args.name, args.top, args.dists, args.clustdat
def main(cluster_file, reference, ligand, data_folder, traj_template, topology, plot_clust_dist, plot_traj):
# Variable declaration
clusters_map = {} # Dict with the cluster equivalences
clusters_counts = {} # Dict with the number of times that each cluster appears
newClusters = {} # Dict with the data of the new clusters
dataFramedistances = {"Clusters": [], "Distances": []}
trajdataFrame = {"Frame": [], "Trajectory": [], "Cluster": [], "Trajectory_names": []}
bar_dataframe = {"Cluster": [], "Counts": []}
# Loading data from cluster
clusters_data = load_cluster_data(cluster_file)
# Sort the trajectories to process
trajectories = glob.glob(os.path.join(data_folder, traj_template))
trajectories.sort(key=lambda x: extrac_traj_num(x))
# Load the data from the reference
reference_traj = md.load(reference)
reference_ligand = reference_traj.atom_slice(obtainLigandIndexes(reference_traj, ligand))
ligand_center = md.compute_center_of_mass(reference_ligand)*10 # multiplies x10 to change from nm to A
# Sort Clusters according to the distance to the reference
clusters_distance =[a for a in map(lambda x: calculate_norm(np.asarray(clusters_data[x]), ligand_center, x), clusters_data)]
clusters_distance.sort()
# Initialize the dictionaries
for i, element in enumerate(clusters_distance):
newClusters[i] = clusters_data[element[1]]
clusters_map[i] = element[1]
clusters_counts[i] = 0
dataFramedistances["Clusters"].append(i)
dataFramedistances["Distances"].append(element[0])
# Load trajectory data if necessary
if plot_traj:
for i, traj in enumerate(trajectories):
traj_num = extrac_traj_num(traj)
trajdataFrame["Frame"].append([])
trajdataFrame["Trajectory"].append([])
trajdataFrame["Cluster"].append([])
print("Starting with trajectory: %s" % traj_num)
if topology:
traj_obj = md.load(traj, top=topology)
else:
traj_obj = md.load(traj)
ligandtraj = traj_obj.atom_slice(obtainLigandIndexes(traj_obj, ligand))
for j, frame in enumerate(ligandtraj):
center = md.compute_center_of_mass(frame)*10 # multiplies x10 to change from nm to A
closestClust = min(map(lambda x: calculate_norm(center[0], newClusters[x], x), newClusters))[1]
clusters_counts[closestClust] += 1
trajdataFrame["Frame"][i].append(j)
trajdataFrame["Trajectory"][i].append(int(traj_num))
trajdataFrame["Cluster"][i].append(int(closestClust))
trajdataFrame["Trajectory_names"].append("Traj_%s" % traj_num)
for key in clusters_counts:
bar_dataframe["Cluster"].append(key)
bar_dataframe["Counts"].append(clusters_counts[key])
plt.style.use('ggplot')
with open("Cluster_equivalences.txt", "w") as clustequi:
clustequi.write("File with the equivalences with the original clusters and the new clusters\nNew Cluster Old Cluster\n")
for key in clusters_map:
clustequi.write("%s %s\n" % (key, clusters_map[key]))
if plot_clust_dist:
plt.plot(dataFramedistances["Clusters"], dataFramedistances["Distances"])
plt.xlabel("Clusters")
plt.ylabel("Distance to Reference")
plt.savefig("clusters_distances.png")
if plot_traj:
plt.figure(1)
plt.subplot(1, 2, 1)
cmap = plt.get_cmap("viridis")
data = zip(trajdataFrame["Trajectory"], cmap.colors[::int(len(cmap.colors)/len(trajdataFrame["Trajectory"]))])
traj_count = 0
for trajectory, color in data:
plt.scatter(x=trajdataFrame["Cluster"][traj_count], y=trajectory, c=color)
traj_count += 1
plt.xlabel('Clusters')
plt.ylabel('Trajectory')
# plt.yticks([i for i in range(len(trajdataFrame["Trajectory_names"]))], trajdataFrame["Trajectory_names"])
plt.subplot(1, 2, 2)
plt.bar(x=bar_dataframe["Cluster"], height=bar_dataframe["Counts"], color=cmap.colors)
plt.xlabel('Clusters')
plt.ylabel('Counts')
plt.savefig("clusters_trajectory.png")
if __name__ == "__main__":
cluster_file, reference, ligand, data_folder, traj_template, topology, plot_clust_dist, plot_traj = parseArguments()
main(cluster_file, reference, ligand, data_folder, traj_template, topology, plot_clust_dist, plot_traj)
| mit |
microsoft/LightGBM | python-package/lightgbm/plotting.py | 1 | 26277 | # coding: utf-8
"""Plotting library."""
from copy import deepcopy
from io import BytesIO
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from .basic import Booster, _log_warning
from .compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED
from .sklearn import LGBMModel
def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> None:
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError(f"{obj_name} must be a tuple of 2 elements.")
def _float2str(value: float, precision: Optional[int] = None) -> str:
return (f"{value:.{precision}f}"
if precision is not None and not isinstance(value, str)
else str(value))
def plot_importance(
booster: Union[Booster, LGBMModel],
ax=None,
height: float = 0.2,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
title: Optional[str] = 'Feature importance',
xlabel: Optional[str] = 'Feature importance',
ylabel: Optional[str] = 'Features',
importance_type: str = 'split',
max_num_features: Optional[int] = None,
ignore_zero: bool = True,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
grid: bool = True,
precision: Optional[int] = 3,
**kwargs: Any
) -> Any:
"""Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot importance.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Booster's feature_importance is empty.")
tuples = sorted(zip(feature_name, importance), key=lambda x: x[1])
if ignore_zero:
tuples = [x for x in tuples if x[1] > 0]
if max_num_features is not None and max_num_features > 0:
tuples = tuples[-max_num_features:]
labels, values = zip(*tuples)
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip(values, ylocs):
ax.text(x + 1, y,
_float2str(x, precision) if importance_type == 'gain' else x,
va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_split_value_histogram(
booster: Union[Booster, LGBMModel],
feature: Union[int, str],
bins: Union[int, str, None] = None,
ax=None,
width_coef: float = 0.8,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
title: Optional[str] = 'Split value histogram for feature with @index/name@ @feature@',
xlabel: Optional[str] = 'Feature split value',
ylabel: Optional[str] = 'Count',
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
grid: bool = True,
**kwargs: Any
) -> Any:
"""Plot split value histogram for the specified feature of the model.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance of which feature split value histogram should be plotted.
feature : int or string
The feature name or index the histogram is plotted for.
If int, interpreted as index.
If string, interpreted as name.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
width_coef : float, optional (default=0.8)
Coefficient for histogram bar width.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Split value histogram for feature with @index/name@ @feature@")
Axes title.
If None, title is disabled.
@feature@ placeholder can be used, and it will be replaced with the value of ``feature`` parameter.
@index/name@ placeholder can be used,
and it will be replaced with ``index`` word in case of ``int`` type ``feature`` parameter
or ``name`` word in case of ``string`` type ``feature`` parameter.
xlabel : string or None, optional (default="Feature split value")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Count")
Y-axis title label.
If None, title is disabled.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
**kwargs
Other parameters passed to ``ax.bar()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with specified model's feature split value histogram.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
else:
raise ImportError('You must install matplotlib and restart your session to plot split value histogram.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
hist, bins = booster.get_split_value_histogram(feature=feature, bins=bins, xgboost_style=False)
if np.count_nonzero(hist) == 0:
raise ValueError('Cannot plot split value histogram, '
f'because feature {feature} was not used in splitting')
width = width_coef * (bins[1] - bins[0])
centred = (bins[:-1] + bins[1:]) / 2
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.bar(centred, hist, align='center', width=width, **kwargs)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
range_result = bins[-1] - bins[0]
xlim = (bins[0] - range_result * 0.2, bins[-1] + range_result * 0.2)
ax.set_xlim(xlim)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (0, max(hist) * 1.1)
ax.set_ylim(ylim)
if title is not None:
title = title.replace('@feature@', str(feature))
title = title.replace('@index/name@', ('name' if isinstance(feature, str) else 'index'))
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_metric(
booster: Union[Dict, LGBMModel],
metric: Optional[str] = None,
dataset_names: Optional[List[str]] = None,
ax=None,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
title: Optional[str] = 'Metric during training',
xlabel: Optional[str] = 'Iterations',
ylabel: Optional[str] = 'auto',
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
grid: bool = True
) -> Any:
"""Plot one metric during training.
Parameters
----------
booster : dict or LGBMModel
Dictionary returned from ``lightgbm.train()`` or LGBMModel instance.
metric : string or None, optional (default=None)
The metric name to plot.
Only one metric supported because different metrics have various scales.
If None, first metric picked from dictionary (according to hashcode).
dataset_names : list of strings or None, optional (default=None)
List of the dataset names which are used to calculate metric to plot.
If None, all datasets are used.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Metric during training")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Iterations")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="auto")
Y-axis title label.
If 'auto', metric name is used.
If None, title is disabled.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
Returns
-------
ax : matplotlib.axes.Axes
The plot with metric's history over the training.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot metric.')
if isinstance(booster, LGBMModel):
eval_results = deepcopy(booster.evals_result_)
elif isinstance(booster, dict):
eval_results = deepcopy(booster)
else:
raise TypeError('booster must be dict or LGBMModel.')
num_data = len(eval_results)
if not num_data:
raise ValueError('eval results cannot be empty.')
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
if dataset_names is None:
dataset_names = iter(eval_results.keys())
elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names:
raise ValueError('dataset_names should be iterable and cannot be empty')
else:
dataset_names = iter(dataset_names)
name = next(dataset_names) # take one as sample
metrics_for_one = eval_results[name]
num_metric = len(metrics_for_one)
if metric is None:
if num_metric > 1:
_log_warning("More than one metric available, picking one to plot.")
metric, results = metrics_for_one.popitem()
else:
if metric not in metrics_for_one:
raise KeyError('No given metric in eval results.')
results = metrics_for_one[metric]
num_iteration, max_result, min_result = len(results), max(results), min(results)
x_ = range(num_iteration)
ax.plot(x_, results, label=name)
for name in dataset_names:
metrics_for_one = eval_results[name]
results = metrics_for_one[metric]
max_result, min_result = max(max(results), max_result), min(min(results), min_result)
ax.plot(x_, results, label=name)
ax.legend(loc='best')
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, num_iteration)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
range_result = max_result - min_result
ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2)
ax.set_ylim(ylim)
if ylabel == 'auto':
ylabel = metric
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def _to_graphviz(
tree_info: Dict[str, Any],
show_info: List[str],
feature_names: Union[List[str], None],
precision: Optional[int] = 3,
orientation: str = 'horizontal',
constraints: Optional[List[int]] = None,
**kwargs: Any
) -> Any:
"""Convert specified tree to graphviz instance.
See:
- https://graphviz.readthedocs.io/en/stable/api.html#digraph
"""
if GRAPHVIZ_INSTALLED:
from graphviz import Digraph
else:
raise ImportError('You must install graphviz and restart your session to plot tree.')
def add(root, total_count, parent=None, decision=None):
"""Recursively add node or edge."""
if 'split_index' in root: # non-leaf
l_dec = 'yes'
r_dec = 'no'
if root['decision_type'] == '<=':
lte_symbol = "≤"
operator = lte_symbol
elif root['decision_type'] == '==':
operator = "="
else:
raise ValueError('Invalid decision type in tree model.')
name = f"split{root['split_index']}"
if feature_names is not None:
label = f"<B>{feature_names[root['split_feature']]}</B> {operator}"
else:
label = f"feature <B>{root['split_feature']}</B> {operator} "
label += f"<B>{_float2str(root['threshold'], precision)}</B>"
for info in ['split_gain', 'internal_value', 'internal_weight', "internal_count", "data_percentage"]:
if info in show_info:
output = info.split('_')[-1]
if info in {'split_gain', 'internal_value', 'internal_weight'}:
label += f"<br/>{_float2str(root[info], precision)} {output}"
elif info == 'internal_count':
label += f"<br/>{output}: {root[info]}"
elif info == "data_percentage":
label += f"<br/>{_float2str(root['internal_count'] / total_count * 100, 2)}% of data"
fillcolor = "white"
style = ""
if constraints:
if constraints[root['split_feature']] == 1:
fillcolor = "#ddffdd" # light green
if constraints[root['split_feature']] == -1:
fillcolor = "#ffdddd" # light red
style = "filled"
label = f"<{label}>"
graph.node(name, label=label, shape="rectangle", style=style, fillcolor=fillcolor)
add(root['left_child'], total_count, name, l_dec)
add(root['right_child'], total_count, name, r_dec)
else: # leaf
name = f"leaf{root['leaf_index']}"
label = f"leaf {root['leaf_index']}: "
label += f"<B>{_float2str(root['leaf_value'], precision)}</B>"
if 'leaf_weight' in show_info:
label += f"<br/>{_float2str(root['leaf_weight'], precision)} weight"
if 'leaf_count' in show_info:
label += f"<br/>count: {root['leaf_count']}"
if "data_percentage" in show_info:
label += f"<br/>{_float2str(root['leaf_count'] / total_count * 100, 2)}% of data"
label = f"<{label}>"
graph.node(name, label=label)
if parent is not None:
graph.edge(parent, name, decision)
graph = Digraph(**kwargs)
rankdir = "LR" if orientation == "horizontal" else "TB"
graph.attr("graph", nodesep="0.05", ranksep="0.3", rankdir=rankdir)
if "internal_count" in tree_info['tree_structure']:
add(tree_info['tree_structure'], tree_info['tree_structure']["internal_count"])
else:
raise Exception("Cannot plot trees with no split")
if constraints:
# "#ddffdd" is light green, "#ffdddd" is light red
legend = """<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD COLSPAN="2"><B>Monotone constraints</B></TD>
</TR>
<TR>
<TD>Increasing</TD>
<TD BGCOLOR="#ddffdd"></TD>
</TR>
<TR>
<TD>Decreasing</TD>
<TD BGCOLOR="#ffdddd"></TD>
</TR>
</TABLE>
>"""
graph.node("legend", label=legend, shape="rectangle", color="white")
return graph
def create_tree_digraph(
booster: Union[Booster, LGBMModel],
tree_index: int = 0,
show_info: Optional[List[str]] = None,
precision: Optional[int] = 3,
orientation: str = 'horizontal',
**kwargs: Any
) -> Any:
"""Create a digraph representation of specified tree.
Each node in the graph represents a node in the tree.
Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means
"this node splits on the feature named "Column_10", with threshold 875.9".
Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a
leaf node, and the predicted value for records that fall into this node
is 0.422". The number (``2``) is an internal unique identifier and doesn't
have any special meaning.
.. note::
For more information please visit
https://graphviz.readthedocs.io/en/stable/api.html#digraph.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance to be converted.
tree_index : int, optional (default=0)
The index of a target tree to convert.
show_info : list of strings or None, optional (default=None)
What information should be shown in nodes.
- ``'split_gain'`` : gain from adding this split to the model
- ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node
- ``'internal_count'`` : number of records from the training data that fall into this non-leaf node
- ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node
- ``'leaf_count'`` : number of records from the training data that fall into this leaf node
- ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node
- ``'data_percentage'`` : percentage of training data that fall into this node
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
orientation : string, optional (default='horizontal')
Orientation of the tree.
Can be 'horizontal' or 'vertical'.
**kwargs
Other parameters passed to ``Digraph`` constructor.
Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.
Returns
-------
graph : graphviz.Digraph
The digraph representation of specified tree.
"""
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
model = booster.dump_model()
tree_infos = model['tree_info']
if 'feature_names' in model:
feature_names = model['feature_names']
else:
feature_names = None
monotone_constraints = model.get('monotone_constraints', None)
if tree_index < len(tree_infos):
tree_info = tree_infos[tree_index]
else:
raise IndexError('tree_index is out of range.')
if show_info is None:
show_info = []
graph = _to_graphviz(tree_info, show_info, feature_names, precision,
orientation, monotone_constraints, **kwargs)
return graph
def plot_tree(
booster: Union[Booster, LGBMModel],
ax=None,
tree_index: int = 0,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
show_info: Optional[List[str]] = None,
precision: Optional[int] = 3,
orientation: str = 'horizontal',
**kwargs: Any
) -> Any:
"""Plot specified tree.
Each node in the graph represents a node in the tree.
Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means
"this node splits on the feature named "Column_10", with threshold 875.9".
Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a
leaf node, and the predicted value for records that fall into this node
is 0.422". The number (``2``) is an internal unique identifier and doesn't
have any special meaning.
.. note::
It is preferable to use ``create_tree_digraph()`` because of its lossless quality
and returned objects can be also rendered and displayed directly inside a Jupyter notebook.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance to be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
tree_index : int, optional (default=0)
The index of a target tree to plot.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
show_info : list of strings or None, optional (default=None)
What information should be shown in nodes.
- ``'split_gain'`` : gain from adding this split to the model
- ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node
- ``'internal_count'`` : number of records from the training data that fall into this non-leaf node
- ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node
- ``'leaf_count'`` : number of records from the training data that fall into this leaf node
- ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node
- ``'data_percentage'`` : percentage of training data that fall into this node
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
orientation : string, optional (default='horizontal')
Orientation of the tree.
Can be 'horizontal' or 'vertical'.
**kwargs
Other parameters passed to ``Digraph`` constructor.
Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.
Returns
-------
ax : matplotlib.axes.Axes
The plot with single tree.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.image as image
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot tree.')
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
graph = create_tree_digraph(booster=booster, tree_index=tree_index,
show_info=show_info, precision=precision,
orientation=orientation, **kwargs)
s = BytesIO()
s.write(graph.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax
| mit |
zhenv5/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
chuajiesheng/twitter-sentiment-analysis | analysis/svm_sgd.py | 1 | 3362 | from sklearn.feature_extraction.text import *
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import *
from sklearn.metrics import *
from tokenizers import *
import numpy as np
from pprint import pprint
from time import time
# import dataset
def get_dataset():
files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']
x = []
for file in files:
s = []
with open(file, 'r') as f:
for line in f:
s.append(line.strip())
assert len(s) == 1367
x.extend(s)
y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)
return x, y
tweets, target = get_dataset()
# split train/test 60/40
X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.1, random_state=1)
print('Train: \t{},{}'.format(len(X_train), y_train.shape))
print('Test: \t{},{}'.format(len(X_test), y_test.shape))
pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3))),
('tfidf', TfidfTransformer(norm='l2', use_idf=True)),
('clf', SGDClassifier(loss='squared_loss', penalty='l2', alpha=1e-04, n_iter=50, random_state=42))])
pipeline = pipeline.fit(X_train, y_train)
# predict
predicted = pipeline.predict(X_test)
print('Accuracy: \t\t{}'.format(accuracy_score(y_test, predicted)))
print('Macro F1: \t\t{}'.format(f1_score(y_test, predicted, average='macro')))
X_ones = np.array(X_test)[y_test == 1]
predicted_positive = pipeline.predict(X_ones)
print('Positive accuracy: \t{}'.format(np.mean(predicted_positive == 1)))
X_ones = np.array(X_test)[y_test == -1]
predicted_negative = pipeline.predict(X_ones)
print('Negative accuracy: \t{}'.format(np.mean(predicted_negative == -1)))
# metrics
predicted = pipeline.predict(X_test)
print(classification_report(y_test, predicted))
print('Confusion matrix: \n{}'.format(confusion_matrix(y_test, predicted)))
# grid search
parameters = {
# 'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 2), (1, 3)), # unigrams or bigrams
# 'vect__tokenizer': (SkipgramTokenizer(3, 2), SkipgramTokenizer(2, 2), None),
# 'tfidf__use_idf': (True, False),
# 'tfidf__norm': ('l1', 'l2'),
'clf__loss': ('squared_loss', 'hinge', 'log', 'epsilon_insensitive'),
'clf__alpha': (0.0001, 0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (50, 80),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=8, cv=ShuffleSplit(n_splits=10, test_size=0.2, random_state=10), verbose=1, scoring='accuracy')
print('Performing grid search...')
print('pipeline: {}'.format([name for name, _ in pipeline.steps]))
print('parameters:')
pprint(parameters)
t0 = time()
grid_search.fit(tweets, target)
print("Done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
# Best score: 0.627
# Best parameters set:
# clf__alpha: 0.0001
# clf__loss: 'squared_loss'
# clf__n_iter: 50
# clf__penalty: 'elasticnet'
# vect__ngram_range: (1, 3) | apache-2.0 |
hckr/sig_proc | lab2.py | 1 | 3737 | #!/usr/bin/env python
# encoding: utf-8
# Jakub Młokosiewicz, 2015
from matplotlib import rc
from matplotlib.pyplot import *
from math import pi
from matplotlib.backends.backend_pdf import PdfPages
from mysignalslib import Wave
from waveplothelper import multiplot
rc('figure', figsize=(8.27, 11.7), dpi=100)
rc('savefig', bbox='tight')
rc('legend', fontsize=10, fancybox=True)
rc('text', usetex=True)
rc('text.latex', preamble=r'\usepackage[T1]{polski}')
pdf_pages = PdfPages('pdf/lab2.pdf')
###
figure()
text(0.05, 0.3, u'{\\textsc {\\huge Sprawozdanie z Przetwarzania Sygnałów}} \\\\\\\\\\\\ {\\Large reprezentacja sygnałów w dziedzinie czasu i częstotliwości} \\\\\\\\\\\\\\\\ {\\large Wykonał: Jakub Młokosiewicz}', fontsize=12, ha='left', va='top')
axis('off')
tight_layout()
pdf_pages.savefig()
###
sinus = Wave.sine(amp=5, freq=50, phi=0, autolabel=True)
sinus_spectrum = sinus.amplitude_spectrum()
sinus_spectrum.label = 'Sinus w dziedzinie częstotliwości'
figure()
suptitle(u'Reprezentacja pojedynczej fali sinusoidalnej w dziedzinie czasu i częstotliwości', fontsize=16)
multiplot([sinus], others=[sinus_spectrum], x_range=[0, pi/2])
pdf_pages.savefig()
###
sinus_1 = Wave.sine(amp=2, freq=80, phi=0, autolabel=True)
sinus_2 = Wave.sine(amp=5, freq=120, phi=0, autolabel=True)
sinus_sum = sinus_1 + sinus_2
sinus_sum.label = u'Suma powyższych sygnałów'
sum_spectrum = sinus_sum.amplitude_spectrum()
sum_spectrum.label = u'Widmo częstotliwościowe sumy sygnałów'
figure()
suptitle(u'Suma dwóch fal sinusoidalnych', fontsize=16)
multiplot([sinus_1, sinus_2], [sinus_sum], others=[sum_spectrum], x_range=[0, pi/2])
pdf_pages.savefig()
###
sinus_1 = Wave.sine(amp=10, freq=10, phi=0, autolabel=True)
sinus_2 = Wave.sine(amp=6, freq=50, phi=0, autolabel=True)
sinus_3 = Wave.sine(amp=3, freq=120, phi=pi/2, autolabel=True)
sinus_sum = sinus_1 + sinus_2 + sinus_3
sinus_sum.label = u'Suma powyższych sygnałów'
sum_spectrum = sinus_sum.amplitude_spectrum()
sum_spectrum.label = u'Widmo częstotliwościowe sumy sygnałów'
figure()
suptitle(u'Suma trzech fal sinusoidalnych, w tym jednej przesuniętej w fazie', fontsize=16)
multiplot([sinus_1, sinus_2, sinus_3], [sinus_sum], others=[sum_spectrum], x_range=[0, pi/2])
pdf_pages.savefig()
###
square = Wave.square([0, 1, 0, 1, 0, 1], tick_duration=pi/2, label='[0, 1, 0, 1, 0, 1]')
square_spectrum = square.amplitude_spectrum()
square_spectrum.label = 'Widmo sygnału'
figure()
suptitle(u'Reprezentacja fali prostokątnej w dziedzinie czasu i częstotliwości', fontsize=16)
multiplot([square], others=[square_spectrum])
pdf_pages.savefig()
###
figure()
suptitle(r'\textsc{Wnioski}', fontsize=18)
findings = '''
Do przedstawiania sygnałów w dziedzinie częstotliwości służy transformata DFT lub jej szybszy odpowiednik - FFT. Po odpowiednim wyskalowaniu osi, na wykresie wspomnianej transformaty sygnału na osi OX będziemy mogli odczytać częstotliwość składowej sinusoidalnej sygnału, natomiast na osi OY - amplitudę tej składowej.
Na wykresie sygnału w dziedzinie częstotliwości dla każdej częstotliwości składowej sumy sygnałów możemy zaobserwować "prążek" o wartości równej amplitudzie tej składowej.
Przesnięcie fazowe składowej sygnału nie ma wpływu na widmo amplitudowe uzyskane z transformaty DFT/FFT (nie zaobserwowałem zależności).
Fala prostokątna w dziedzinie częstotliwości objawia się jako suma wielu fal sinusoidalnych.
'''.strip()
text(0, 1, r'\begin{minipage}{7.4 in} \setlength{\parindent}{2em} %s \end{minipage}' % findings.replace('\n', ' \\par '), fontsize=12, va='top')
axis('off')
tight_layout()
subplots_adjust(top=0.95)
pdf_pages.savefig()
###
pdf_pages.close()
# show() | bsd-2-clause |
cyrilbornet/3n-tools | characterStats.py | 1 | 50359 | # coding: utf8
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import division
import sys, glob, os, re
if sys.version_info < (3,0):
reload(sys)
sys.setdefaultencoding('utf8')
import getopt
import math, operator
import scipy, numpy as np
from scipy.interpolate import spline
import copy, collections
import codecs, csv, pickle, json
###
def getScriptPath():
return os.path.dirname(os.path.realpath(sys.argv[0]))
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import warnings
warnings.simplefilter("error")
os.environ["TREETAGGER_HOME"] = getScriptPath()+"/tree-tagger/cmd"
sys.path.append(getScriptPath()+'/treetagger-python')
from treetagger3 import TreeTagger
tt = TreeTagger(encoding='utf-8',language='french')
import urllib, mwclient
import hunspell
################################################################################################################################################################
stopwords = set(line.strip() for line in codecs.open(getScriptPath()+"/classifiersdata/stopwords.txt", 'r', 'utf8') if line!=u'')
stopwords_pnouns = set(line.strip() for line in codecs.open(getScriptPath()+"/classifiersdata/stopwords_pnouns.txt", 'r', 'utf8') if line!=u'')
structuralRules = []
rules_str = [line.strip() for line in codecs.open(getScriptPath()+"/classifiersdata/struct_rules.txt", 'r', 'utf8')]
for r in rules_str:
prediction = r.split(':')[1]
predicate = r.split(':')[0]
pkeybuffer = ['']
p = {int(p.split('=')[0]):p.split('=')[1] for p in predicate.split('&')}
for i in range(4):
if i in p:
nbuffer = []
for idx, pkey in enumerate(pkeybuffer):
for ppart in p[i].split(','):
nbuffer.append(pkey+ppart)
pkeybuffer = nbuffer
else:
for idx, pkey in enumerate(pkeybuffer):
pkeybuffer[idx] = pkey+'...'
for pkey in pkeybuffer:
rule = re.compile(pkey)
structuralRules.append([rule, prediction])
WORD_FREQUENCE_THRESHOLD = 5 # Names that are mentioned less than n times in the whole book will be ignored (adjusted automatically if dynamicFrequenceFilter = True)
MIN_NOUN_LENGTH = 2 # Nouns shorter than that will be ignored
MINIMAL_MEDIAN_IDX = 1.0 # Names whose median position in sentences are ≤ than 1 will be ignored
MAX_CHARACTERS_GRAPH = 50 # Absolute max number of characters considered for final graph
dynamicFrequenceFilter = False
nobliaryParticles = [u'de',u'd',u"d'",u'del',u'dal',u'da',u'di',u'della',u'du',u'des',u'la',u'le',u'of',u'van',u'von',u'vom',u'zu',u'-']
### TOOLS ######################################################################################################################################################
_names = {}
_tagnums = []
compoundNouns = {}
hunspellstemmer = hunspell.HunSpell(getScriptPath()+'/dictionaries/fr-toutesvariantes.dic',getScriptPath()+'/dictionaries/fr-toutesvariantes.aff')
def stem(word):
wstem = hunspellstemmer.stem(word)
if len(wstem)>0: # and wstem[-1] not in stopwords
return unicode(wstem[-1], 'utf8')
else:
return word
def storeCount(array, key):
if key in array:
array[key] += 1
else:
array[key] = 1
def idxForMaxKeyValPair(array):
maxV = array[0][1]
i = 0
maxVIdx = 0
for k,v in array:
if (v > maxV):
maxV = v
maxVIdx = i
i = i+1
return maxVIdx
def keyForMaxValue(_dict):
maxK = ''
maxV = 0
for k,v in _dict.iteritems():
if (v>maxV):
maxV = v
maxK = k
return maxK
def sortUsingList(tosort, reflist):
return [x for (y,x) in sorted(zip(reflist,tosort))]
### BOT 5 ######################################################################################################################################################
onlineDisambiguationClasses = {
"character":["personnage","personnalité","prénom","animal","saint","naissance","décès","peuple","ethni","patronym"],
"place":["lieu","ville","commune","pays","région","territoire","province","toponym","géographi","géolocalisé","maritime"],
"other":["philosophi","divinité","dieu","religion","sigle","code","science","nombre","mathématique"]
}
onlineDisambiguationStopwords = ["wikip","article","littérature","littéraire"] # wikip: We reached a general information page ("Wikipedia category", "Wikipedia disambiguation",...)
cachedResults = {}
def cachedOnlineDisambiguation(site_TODO, term):
if term in cachedResults:
return cachedResults[term]
else:
return False
def onlineDisambiguation(site, term, originalTerm=None, debug=False, iter=1, checkedClasses=[]):
if (debug):
print("***** Online results for "+term+" *****")
if (originalTerm==None):
originalTerm = term
cachedResult = cachedOnlineDisambiguation(site, term)
if (cachedResult!=False and not debug):
return cachedResult
else:
if (site!=False):
if (iter<5):
pages = site.search(compoundNouns[originalTerm])
for pageData in pages:
page = site.Pages[pageData['title']]
foundAtLeastOneCategory = False
needToLookInText = False
categoriesBasedDisambiguation = []
for cat in page.categories():
foundAtLeastOneCategory = True
if (debug):
print(compoundNouns[originalTerm]+" (as "+term+",iter="+str(iter)+")"+"\t"+pageData['title']+"\t"+cat.name)
for k, cls in onlineDisambiguationClasses.iteritems():
for cl in cls:
if 'homonymie' in cat.name.lower():
needToLookInText = True
if cl in cat.name.lower():
categoriesBasedDisambiguation.append([k, 0 if k=='unknown' else 1])
if needToLookInText:
fullText = page.text().lower()
tot_all = 0 # all occurences of all classification words found
fullTextClasses = []
for k, cls in classes_local.iteritems():
tot_cl = 0 # all occurences of the words cls corresponding to class k
for cl in cls:
tot_cl = tot_cl + fullText.count(cl)
fullTextClasses.append([k, tot_cl])
tot_all = tot_all+tot_cl
if (len(fullTextClasses)>0):
maxCountIdx = idxForMaxKeyValPair(fullTextClasses) # Returns key yielding the highest count
confidence = ((1/(iter*(len(checkedClasses)+1)))*(fullTextClasses[maxCountIdx][1]/tot_all) if tot_all>0 else 0)
foundDisambiguation = [fullTextClasses[maxCountIdx][0], confidence]
if (debug):
print(originalTerm+" ("+term+") -- full text disambiguation results: "+"\t"+foundDisambiguation[0]+"\t"+str(foundDisambiguation[1])+"\t"+str(fullTextClasses))
cachedResults[originalTerm] = foundDisambiguation
updateCachedResults(site)
return foundDisambiguation
elif len(categoriesBasedDisambiguation)>0:
bestCat = bestChoice(categoriesBasedDisambiguation, [], debug)
for c in categoriesBasedDisambiguation:
bestCatCount = sum([k[1] for k in categoriesBasedDisambiguation if k[0]==bestCat[0]])
foundDisambiguation = [bestCat[0], bestCatCount/len(categoriesBasedDisambiguation)]
if (bestCatCount==0):
print(originalTerm)
print(term)
print(bestCat[0])
print(str(categoriesBasedDisambiguation))
if (debug):
print(originalTerm+" ("+term+") -- cat based disambiguation results: "+"\t"+foundDisambiguation[0]+"\t"+str(foundDisambiguation[1])+"\t"+str(categoriesBasedDisambiguation))
cachedResults[originalTerm] = foundDisambiguation
updateCachedResults(site)
return foundDisambiguation #+" : "+cat.name
for cat in page.categories():
if (not cat.name in checkedClasses) and len([w for w in onlineDisambiguationStopwords if w in cat.name.lower()])==0:
checkedClasses.append(cat.name)
return onlineDisambiguation(site, cat.name, originalTerm, debug, iter+1, checkedClasses)
elif (debug):
print("Wiki Lookup disabled")
return [u'unknown', 0]
def readCachedResults(site):
if os.path.isfile(getScriptPath()+"/cache/"+site.host+".csv"):
for row in csv.reader(codecs.open(getScriptPath()+"/cache/"+site.host+".csv", 'r', 'utf8')):
cachedResults[row[0]] = [row[1], float(row[2])]
def updateCachedResults(site):
w = csv.writer(codecs.open(getScriptPath()+"/cache/"+site.host+".csv", "w", 'utf8'))
for key, val in cachedResults.items():
w.writerow([key, val[0], val[1]])
### BOT 1 ######################################################################################################################################################
classes_local = {}
for root, dirs, files in os.walk(getScriptPath()+"/classifiersdata/proximitywordclasses"):
for file in files:
if file.endswith(".txt"):
wordsfile = codecs.open(os.path.join(root, file), 'r', 'utf8')
classes_local[file.replace(".txt", "")] = [line.strip() for line in wordsfile if line[0]!=b"#"]
def obviousPredictor(word, indexesOfSentencesContainingWord, sentences, debug=False):
if (debug):
print("***** Obvious results for "+word+" *****")
scores = {}
predictingWords = []
obviousChars = ['m','m.','mr','monsieur','messieurs','mme','mrs','madame','mesdames','miss','mademoiselle','mesdemoiselles','veuf','veuve','docteur','doctoresse','maître','maîtresse','professeur','professeure','duc','duchesse','archiduc','archiduchesse','grand-duc','grande-duchesse','marquis','marquise','comte','comtesse','vicomte','vicomtesse','baron','baronne','seigneur','sieur','dame','écuyeur','messire','sir','lady','lord','émir','émira','chérif','chérifa','cheikh','cheykha','bey','calife','hadjib','nizam','pervane','sultan','vizir','râja','rani','maharadjah','maharajah','maharaja','malik','shah','chah','padishah','khan','altesse','excellence','majesté','dom','don','père','mère','soeur','frère','fils','fille','abbé','curé','révérend','inquisiteur','inquisitrice','évêque','cardinal','monseigneur','messeigneurs','éminence','sainteté','pharaon','despote','magnat','sire','pape','pontife','roi','reine','prince','princesse','empereur','impératrice','infant','kronprinz','kaiser','aspirant','caporal','colonel','commandant','commandante','lieutenant','maréchal','sergent','officier','sous-officier','soldat']
obviousPlaces = ['pays','région','département','ville','village','cité','avenue','allée','boulevard','rue','chemin','quai','cathédrale','abbaye','église','chapelle','mont','colline','forêt','bois','océan','mer','lac','étang']
obviousOthers = ['dieu','déesse','jésus','marie','vierge']
for index in indexesOfSentencesContainingWord:
sentence = sentences[index]
for wIdx, w in enumerate(sentence["words"]):
if (w==word):
w1 = ''
w2 = ''
w3 = ''
w0 = compoundNouns[w].split(' ')[0].lower()
if (wIdx>1):
w1 = sentence['words'][wIdx-1].lower()
if (wIdx>2):
w2 = sentence['words'][wIdx-2].lower()
if (wIdx>3):
w3 = sentence['words'][wIdx-3].lower()
if (w0 in obviousChars) or (w1 in obviousChars) or (w2 in obviousChars and w1 in nobliaryParticles):
predictingWords.append([w0, w1, w2])
storeCount(scores, 'character')
if (w1 in obviousPlaces) or (w2 in obviousPlaces and w1 in ['de','du',"d'"]):
predictingWords.append([w1, w2])
storeCount(scores, 'place')
if (w.lower() in obviousOthers):
predictingWords.append(w)
storeCount(scores, 'other')
if (debug):
print(str(predictingWords)+"\t"+str(scores))
maxV = 0
maxK = u'unknown'
scoresSum = 0
for k,v in scores.iteritems():
scoresSum = scoresSum+max(0, v)
if (v>maxV):
maxV = v
maxK = k
if (scoresSum>(2*len(scores))):
return [maxK, maxV/scoresSum] # we trust the result only if we saw enough samples, that is on average more than two by category
else:
return [u'unknown', 0]
### BOT 2 ######################################################################################################################################################
def positionPredictor(word, indexesOfSentencesContainingWord, sentences, debug=False):
if (debug):
print("***** Position results for "+word+" *****")
positions = []
for index in indexesOfSentencesContainingWord:
sentence = sentences[index]
for wIdx, w in enumerate(sentence["words"]):
if (w == word):
# if (sentence["tags"][wIdx]!='NAM'):
positions.append(float(wIdx)/float(len(sentence["words"])))
meanpos = np.mean(np.array(positions))
if (debug):
print(word+"\tavg(pos)="+str(meanpos)+"\tstd(pos)="+str(np.std(positions))+"\tcount="+str(len(indexesOfSentencesContainingWord)))
return ['place' if (meanpos>0.45) else 'character', abs(0.45 - meanpos)]
### BOT 3 ######################################################################################################################################################
classes_local = {}
for root, dirs, files in os.walk(getScriptPath()+"/classifiersdata/proximitywordclasses"):
for file in files:
if file.endswith(".txt"):
wordsfile = codecs.open(os.path.join(root, file), 'r', 'utf8')
classes_local[file.replace(".txt", "")] = [line.strip() for line in wordsfile if line[0]!=b"#"]
def localProximityPredictor(word, surroundingTerms, debug=False):
if (debug):
print("***** LocalProx results for "+word+" *****")
print(word+" <-> "+", ".join(surroundingTerms.keys()))
class_probas = {}
for possible_class in classes_local:
class_probas[possible_class] = 0
for class_word in classes_local[possible_class]:
if (class_word in surroundingTerms):
class_probas[possible_class] = class_probas[possible_class]+surroundingTerms[class_word]
if (debug):
print(word+"\t"+class_word+" --> "+possible_class+" (x"+str(surroundingTerms[class_word])+")")
numberOfClues = sum(class_probas.values())
maxProba = 0
confidence = 0
maxProbaClass = u"unknown"
if (numberOfClues>2):
for possible_class in class_probas:
if class_probas[possible_class]>maxProba:
maxProba = class_probas[possible_class]
confidence = float(maxProba)/float(numberOfClues)
maxProbaClass = possible_class
if (debug):
print(word+"\t"+maxProbaClass+"\t"+str(confidence))
return [maxProbaClass, confidence]
#### BOT 4 #####################################################################################################################################################
#def tag2num(tag):
# if tag in _tagnums:
# return _tagnums.index(tag)
# else:
# _tagnums.append(tag)
# return tag2num(tag)
def getSurroundings(array, idx):
surroundings = []
surroundings.append(array[idx-2] if (idx>1) else '---')
if (idx>0):
surroundings.append(array[idx-1])
else:
surroundings.append('---')
if (idx<len(array)-1):
surroundings.append(array[idx+1])
else:
surroundings.append('---')
if (idx<len(array)-2):
surroundings.append(array[idx+2])
else:
surroundings.append('---')
return surroundings
def structuralPredictor(word, indexesOfSentencesContainingWord, sentences, debug=False):
if (debug):
print("***** Structural results for "+word+" *****")
scores = {u"place":0,u"character":0,u"other":0,u"unknown":0}
place_vs_char = 0.0 # Prediction score variable. If results turns out negative, we assume a place. If positive, a character.
noise_score = 0.0 # Noise score. If positive, discard result
positions = []
for index in indexesOfSentencesContainingWord:
sentence = sentences[index]
for wIdx, w in enumerate(sentence["words"]):
if (w == word):
if ("VER:" in sentence["tags"][wIdx]):
scores[u"unknown"] = scores[u"unknown"] + 1.0 # if the word itself is tagged as a verb, we get highly suspicious…
else:
surroundings = [tag.split(':')[0] for tag in getSurroundings(sentence["tags"], wIdx)]
if (debug):
print(word+" ["+sentence["tags"][wIdx]+"],"+",".join(surroundings))
if ("VER" == surroundings[2]):
scores[u"character"] = scores[u"character"] + 2.0
elif ("VER" in surroundings):
scores[u"character"] = scores[u"character"] + 0.5
if ("NAM" == surroundings[2]):
scores[u"character"] = scores[u"character"] + 1.0
if (surroundings[0]=="PRP" or surroundings[1]=="PRP"):
scores[u"place"] = scores[u"place"] + 1.0
if ("VER" == surroundings[1]):
scores[u"place"] = scores[u"place"] + 0.5
if (surroundings[1]=="DET"):
scores[u"place"] = scores[u"place"] + 0.5
pass
if (surroundings[1]=="PRP" and surroundings[2]=="---"):
scores[u"other"] = scores[u"other"] + 1.0
if (surroundings[1]=="PUN"): # noise detection (wrongly tokenized sentences).
scores[u"unknown"] = scores[u"unknown"] + 1.0
else:
scores[u"unknown"] = scores[u"unknown"] - 1.0
if (surroundings[0]=="---" and surroundings[1]=="---"): # noise detection (wrongly tokenized sentences). If this happens, needs to be compensated 2 times
scores[u"unknown"] = scores[u"unknown"] + 2.0
else:
scores[u"unknown"] = scores[u"unknown"] - 1.0
if (debug):
print(' --> '+str(scores))
maxV = 0
maxK = u'unknown'
scoresSum = 0
for k,v in scores.iteritems():
scoresSum = scoresSum+max(0, v)
if (v>maxV):
maxV = v
maxK = k
return [maxK, maxV/scoresSum if scoresSum>0 else 0]
#### BOT 6 #####################################################################################################################################################
'''
def returnNamesFromSynsets(synsets_list):
names = []
for h in synsets_list:
lemmas = h.lemmas()
for l in lemmas:
names.append(l.name())
return names
def allHypernyms(synsets_list):
hypernyms = []
for synset in synsets_list:
synset_hypernyms = synset.hypernyms()
hypernyms = hypernyms+returnNamesFromSynsets(synset_hypernyms)
subs = allHypernyms(synset_hypernyms)
for h in subs:
hypernyms = hypernyms+returnNamesFromSynsets(synset.hypernyms())
return hypernyms
def verbIsAboutSpeech(w):
vstemmed = stem(w)
hypernyms = allHypernyms(wn.synsets(vstemmed, lang='fra', pos=wn.VERB))
return ('verbalise' in hypernyms or 'communicate' in hypernyms or 'breathe' in hypernyms) # Glass & Bangay
'''
def getQuotesPredictorThreshold(words, wsent, sentences, debug):
speakMentionsRatios = []
for w in words:
quotesCount = 0;
for index in wsent[w]:
if ("PUN:cit" in sentences[index]["tags"]):
quotesCount = quotesCount+1
speakMentionsRatios.append(quotesCount/len(wsent[w]))
ratio = np.mean(speakMentionsRatios)
if (debug):
print("***********************************************************")
print("quotesPredictorThreshold = "+str(ratio))
print("***********************************************************")
return ratio
def quotesPredictor(word, indexesOfSentencesContainingWord, sentences, quotesPredictorThreshold, debug=False):
if (debug):
print("***** Quotes/Mentions results for "+word+" *****")
quotesCount = 0
for index in indexesOfSentencesContainingWord:
s = sentences[index]
if ("PUN:cit" in s["tags"]):
quotesCount = quotesCount+1
if (quotesCount>0):
score = quotesCount/len(indexesOfSentencesContainingWord)
if (debug):
print("Quotes="+str(quotesCount)+" / Mentions="+str(len(indexesOfSentencesContainingWord))+" / Score="+str(score));
if (score>=quotesPredictorThreshold):
return ["character", pow((score-quotesPredictorThreshold)/(1-quotesPredictorThreshold), 2)]
else:
return ["place", pow((quotesPredictorThreshold-score)/(quotesPredictorThreshold), 2)]
else:
return ["place", 0.9]
'''
#Variant 1
if (debug):
print("***** Quotes results for "+word+" *****")
distances = []
quotesCount = 0
for index in indexesOfSentencesContainingWord:
sentenceprev = sentences[index-1] if (index>0) else sentences[index]
sentencenext = sentences[index+1] if (index<len(sentences)-1) else sentences[len(sentences)]
sentence = sentences[index]
if ("PUN:cit" in sentence["tags"]):
diff = sentence["tags"].index("PUN:cit") - sentence["words"].index(word) # We look for citations openings AFTER the word (since we may not have the citation end mark in the case of "--" notations)
if (diff>0):
for wIdx, w in enumerate(sentence["tags"]):
if ("VER" in w and verbIsAboutSpeech(sentence["words"][wIdx])):
print sentence["words"][wIdx]+" :: "+str(0) # wnapp.get_relations_data(word, verb_synsets[0]) quotesCount = quotesCount+1
distances.append(diff)
if ("PUN:cit" in sentenceprev["tags"] or "PUN:cit" in sentencenext["tags"]):
quotesCount = quotesCount+1
# diff = sentenceprev["tags"].index("PUN:cit") - sentence["words"].index(word)
if (debug):
print("Quotes="+str(quotesCount)+",\t"+str(distances));
if (quotesCount>0):
score = sum(distances)/quotesCount
return ["character", score]
if (score>=0.01):
return ["character", score]
else:
return ["place", 1-(score*10)]
else:
return ["place", 0.9]
'''
### ######################################################################################################################################################
def tokenizeAndStructure(text):
taggedText = tt.tag(text)
tagstats = {}
chaps = collections.OrderedDict()
cnum = ''
chapter_sentences_idx = []
allsentences = []
sent_words = []
sent_tags = []
for tag in taggedText:
if ("_CHAP_" in tag[0]):
if (cnum!=''):
chaps[cnum] = chapter_sentences_idx
chapter_sentences_idx = []
cnum = tag[0][6:]
elif (tag[1]==u"SENT"):
nostop = [w for w in sent_words if w not in stopwords]
sent = {u"words":sent_words,u"tags":sent_tags,u"nostop":nostop}
chapter_sentences_idx.append(len(allsentences))
allsentences.append(sent)
sent_words = []
sent_tags = []
else:
sent_words.append(tag[0])
sent_tags.append(tag[1])
return [chaps, allsentences]
################################################################################################################################################################
def bestChoice(_predictions, weights = [], debug=False):
predictions = copy.deepcopy(_predictions)
if len(weights)==0:
weights = [1 for p in predictions]
if (debug):
print(" - Predictions: "+str(predictions))
zeroProbas = []
duplicates = []
for idx, p in enumerate(predictions):
# Check probabilities, remove predictions with p=0
if p is None or len(p)!=2:
print("prediction "+str(idx)+" invalid")
print(" (len="+str(len(p))+"): ["+",".join(p)+"]")
exit()
elif p[1]==0:
zeroProbas.append(idx)
# Apply weighting
elif (weights[idx]==0):
zeroProbas.append(idx)
elif (weights[idx]>1) and not p[1]==0:
for n in range(1, weights[idx]):
duplicates.append(p)
for p in duplicates:
predictions.append(p)
zeroProbas.sort(reverse=True)
for pIdx in zeroProbas:
del predictions[pIdx] # Remove predictions with probability 0
if (len(predictions)>0):
maxProbaIdx = idxForMaxKeyValPair(predictions) # Returns key yielding the highest probabilities
else:
return ['unknown', 0]
if len(predictions)==0:
return copy.deepcopy(_predictions[0]) # in case all the entries were removed, we return a copy of the former first item for compliance
allAgree = True
agreeOnClass = predictions[0][0]
for p in predictions:
if (p[0]!=agreeOnClass):
allAgree = False
if (allAgree):
return predictions[maxProbaIdx] # here we could also return [agreeOnClass, 1]
else:
predClasses = {}
for prediction in predictions:
storeCount(predClasses, prediction[0])
if (len(predClasses)==len(predictions)): # we have exactly as many classes as predictions (i.e. each predictor said something different)
return predictions[maxProbaIdx]
else:
mostRepresentedClassesCount = predClasses[max(predClasses.iteritems(), key=operator.itemgetter(1))[0]]
for pred in predClasses.keys():
if predClasses[pred]<mostRepresentedClassesCount:
del predClasses[pred]
validPredictions = [p for p in predictions if p[0] in predClasses.keys()]
return validPredictions[idxForMaxKeyValPair(validPredictions)]
def detect_ucwords(fulltext, sentences, debug=False):
_ucwords = {}
# Get all the uppercase words that are not leading sentences
for sent in sentences:
s = sent[u"nostop"]
if (len(s)>1):
grams5 = zip(s[1:-4], s[2:-3], s[3:-2], s[4:-1], s[5:])
grams3 = zip(s[1:-2], s[2:-1], s[3:])
grams2 = zip(s[1:-1], s[2:])
grams1 = zip(s[1:])
sentUCWords = []
for gram in grams5:
if (gram[0][0].isupper() and (gram[1] in [u'-', u"'"]) and (gram[3] in [u'-', u"'"])):
sentUCWords.append(gram)
for gram in grams3:
if (gram[0][0].isupper() and gram[2][0].isupper()):
if (gram[1] in nobliaryParticles):
sentUCWords.append(gram)
elif (gram[1] in [u"'"]):
sentUCWords.append(gram)
elif (gram[1][0].isupper()):
sentUCWords.append(gram)
for gram in grams2:
if (gram[0][0].isupper() and gram[1][0].isupper()):
sentUCWords.append(gram)
sentUCWords_flat = [w for _tuple in sentUCWords for w in _tuple]
for gram in grams1:
if (gram[0][0].isupper() and not (gram[0] in sentUCWords_flat)):
sentUCWords.append(gram)
for gram in sentUCWords:
gramStrRepresentation = u" ".join(gram).replace(u"' ", u"'")
storeCount(_ucwords, gramStrRepresentation)
if (debug):
print("***** UC Words found *****")
print(", ".join(_ucwords.keys()))
print("**************************")
return _ucwords
################################################################################################################################################################
def getUseStats(word, ucwords, chapters, sentences, wprev, wnext, wsent):
if len(wsent[word])>0:
chaptersCovering = []
frequenciesDiff = []
chapterStart = [i for i in range(0,len(chapters)) if wsent[word][0] in chapters[chapters.keys()[i]]][0]
chapterEnd = [i for i in range(0,len(chapters)) if wsent[word][-1] in chapters[chapters.keys()[i]]][0]
for c, csidx in chapters.iteritems():
intersect = [i for i in csidx if i in wsent[word]]
chaptersCovering.append(len(intersect))
expectedPerc = (len(csidx)/len(sentences))
observedPerc = (len(intersect)/ucwords[word])
frequenciesDiff.append(abs(expectedPerc-observedPerc))
return {
'firstsent':wsent[word][0],
'lastsent':wsent[word][-1],
'coverage':(wsent[word][-1]-wsent[word][0])/len(sentences),
'chapters':chaptersCovering,
'chapterStart':chapterStart,
'chapterEnd':chapterEnd,
'dp': sum(frequenciesDiff)/2
}
else:
return {}
def getMainCharacters(ucwords, sentences, wprev, wnext, wsent):
return ucwords
def sortbydescwordlengths(a,b):
return len(b) - len(a)
def joinCompoundNouns(fulltext, ucwords):
allucwords = copy.deepcopy(ucwords.keys())
allucwords.sort(sortbydescwordlengths)
for w in allucwords:
if (u" " in w) or (u"'" in w):
wjoined = w.replace(u" ", u"").replace(u".", u"").replace(u"'", u"").encode("utf-8")
if (w.endswith("'")):
wjoined = wjoined+u"'"
fulltext = fulltext.replace(w, wjoined)
compoundNouns[wjoined] = w
else:
compoundNouns[w] = w
return fulltext
def confirmProperNoun(word, wmedianidx, wsentences, ucwords):
if (len(word) < MIN_NOUN_LENGTH) or (word.endswith("'") and len(word) < MIN_NOUN_LENGTH+1):
if debug:
print("Word ignored: "+word+" [len<"+str(MIN_NOUN_LENGTH)+"]")
return False
if (word.lower() in stopwords):
if debug:
print("Word ignored: "+word+" [in general stopwords"+"]")
return False
if (word in stopwords_pnouns):
if debug:
print("Word ignored: "+word+" [in proper nouns stopwords"+"]")
return False
if (wmedianidx<=MINIMAL_MEDIAN_IDX):
if debug:
print("Word ignored: "+word+" [median idx="+str(wmedianidx)+"]")
return False
wordTags = []
for s in wsentences:
wordTags.append(s['tags'][s['words'].index(word)])
# for i, w in enumerate(s['words']):
# if w==word:
# wordTags.append(s['tags'][i])
if not ('NAM' in wordTags or 'NOM' in wordTags):
if debug:
print("Word ignored: "+word+" [tagged "+str(wordTags)+"]")
return False
return True
def getIdxOfWord(ws, w):
try:
wIdx = ws.index(w)
except:
wIdx = -1
return wIdx
def removeFalsePositives(sentences, wmedianidx, wprev, wnext, wsent, ucwords):
for word, medianidx in wmedianidx.iteritems():
proxWords = {}
for w in [w for _sub in [wprev[word].keys(), wnext[word].keys()] for w in _sub]:
storeCount(proxWords, w)
rejected = False
if (not confirmProperNoun(word, medianidx, [sentences[i] for i in wsent[word]], ucwords)):
rejected = True
if (word.endswith('s') and word[:-1] in ucwords):
rejected = True
if debug:
print("Word ignored: "+word+" supposed plural form of "+word[:-1])
if (rejected):
del ucwords[word]
del wprev[word]
del wnext[word]
del wsent[word]
def getNounsSurroundings(sentences, ucwords, fulltext):
wprev = {}
wnext = {}
wsent = {}
wmeanidx = {}
wmedidx = {}
allucwords = ucwords.keys()
for word in allucwords:
wprev[word] = {}
wnext[word] = {}
wsent[word] = []
wPositions = []
i = 0.0
for sentIdx, sent in enumerate(sentences):
wpos = getIdxOfWord(sent["nostop"], word)
if (wpos > -1):
wsent[word].append(sentIdx)
wPositions.append(wpos)
if wpos>0:
storeCount(wprev[word], stem(sent["nostop"][wpos-1]))
if wpos<len(sent["nostop"])-1:
storeCount(wnext[word], stem(sent["nostop"][wpos+1]))
i = i+1.0
if (len(wPositions)>0):
wmeanidx[word] = np.mean(np.array(wPositions))
wmedidx[word] = np.median(np.array(wPositions))
else:
wmeanidx[word] = 0
wmedidx[word] = 0
return [wprev, wnext, wsent, wmeanidx, wmedidx]
def removeBelowThreshold(sentences, wmeanidx, wprev, wnext, wsent, ucwords):
allucwords = ucwords.keys()
for word in allucwords:
if (len(wsent[word])>=WORD_FREQUENCE_THRESHOLD):
ucwords[word] = len(wsent[word])
else:
del ucwords[word]
del wprev[word]
del wnext[word]
del wsent[word]
del wmeanidx[word]
################################################################################################################################################################
def processBook(bookfile, mwsite, focus, benchmark, debug=False, verbose=False, graphs=False):
jsonOut = {}
ucwords = {}
sentences = []
benchmarkValues = {"found":0,"correct":0,"predictors":[[],[],[],[],[],[],[],[],[]]}
finalWordClasses = {'character':[],'place':[]}
allpredictions = {}
with codecs.open(bookfile, 'r', 'utf8') as f:
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
chapters_lines_buff = []
for i, raw_line in enumerate(f):
line_split = raw_line.split(u"\t")
chapter_number = line_split[0] # First component is treated as chapter number
line = line_split[-1] # Last component is the actual text (TODO: handle possible title in the middle, if set)
line = line.replace(u"’", u"'").replace(u"«", u" « ").replace(u"»", u" » ").replace(u"--", u" « ").replace(u"_", u" ").strip() #.replace(u"-", u" ")
chapters_lines_buff.append(u'. _CHAP_'+chapter_number+u'. '+line)
fulltext = u" ".join(chapters_lines_buff)
if (dynamicFrequenceFilter):
global WORD_FREQUENCE_THRESHOLD
allwords = len(re.findall(r'\w+', fulltext))
# WORD_FREQUENCE_THRESHOLD = round(6+((math.log(math.log(allwords))*allwords)/10000)/5)
WORD_FREQUENCE_THRESHOLD = round(6+(allwords/10000)/4)
[chapters, sentences] = tokenizeAndStructure(fulltext)
if (focus==''):
ucwords = detect_ucwords(fulltext, sentences, debug)
fulltext = joinCompoundNouns(fulltext, ucwords)
[chapters, sentences] = tokenizeAndStructure(fulltext)
ucwords = detect_ucwords(fulltext, sentences, debug)
else:
ucwords = {}
focusWords = focus.split(u",")
for w in focusWords:
ucwords[w] = WORD_FREQUENCE_THRESHOLD
compoundNouns[w] = w
[wprev, wnext, wsent, wmeanidx, wmedidx] = getNounsSurroundings(sentences, ucwords, fulltext)
removeFalsePositives(sentences, wmedidx, wprev, wnext, wsent, ucwords)
ucwtotcount = sum(ucwords.values())
ucwtotunique = len(ucwords)
removeBelowThreshold(sentences, wmeanidx, wprev, wnext, wsent, ucwords)
quotesPredictorThreshold = getQuotesPredictorThreshold(ucwords, wsent, sentences, debug)
sorted_ucw = sorted(ucwords.items(), key=operator.itemgetter(1))
sorted_ucw.reverse()
weights = [3, 1, 1, 1, 1]
if (mwsite!=False):
weights.append(1)
for word, wcount in sorted_ucw:
if not word in compoundNouns:
compoundNouns[word] = word
proxWords = {}
for w in [w for _sub in [wprev[word].keys(), wnext[word].keys()] for w in _sub]:
storeCount(proxWords, w)
allpredictions[word] = [
obviousPredictor(word, wsent[word], sentences, debug),
positionPredictor(word, wsent[word], sentences, debug),
localProximityPredictor(word, proxWords, debug),
structuralPredictor(word, wsent[word], sentences, debug),
# structuralPredictor2(word, wsent[word], sentences, debug),
quotesPredictor(word, wsent[word], sentences, quotesPredictorThreshold, debug)
]
if (mwsite!=False):
allpredictions[word].append(onlineDisambiguation(mwsite, word, word, debug))
if (len(allpredictions[word])!=len(weights)):
print('ERROR: Weights and predictors mismatch.')
exit()
if (debug):
print('-----------------------------------')
# Tweak weights according to allpredictions results. For instance, remove predictors whose % deviate too much from the others
# charsPlacesRatio = []
# predictorRatioCounts = []
#
# for pIdx in range(0,len(weights)):
# charsPlacesRatio.append((len([1 for wp in allpredictions if allpredictions[wp][pIdx][0]=='character']))/(len([1 for wp in allpredictions if allpredictions[wp][pIdx][0]=='place'])+1))
# median = np.median(np.array(charsPlacesRatio))
# MAD = np.median([abs(r - median) for r in charsPlacesRatio])
# for rIdx, r in enumerate(charsPlacesRatio):
# if (debug):
# print(str(rIdx)+":"+str(r))
# if (abs(r - median) > 1.4826*MAD):
# weights[rIdx] = 0
# pass
# if (debug):
# print('Adjusted predictors weights: '+str(weights))
if (saveResults):
with codecs.open(getScriptPath()+u"/cache/results-"+bookfile.split(u"/")[-1], 'wb', 'utf8') as f:
pickle.dump(allpredictions, f)
for word, wcount in sorted_ucw:
if (debug): print(word)
best = bestChoice(allpredictions[word], weights, debug)
if (debug): print(' --> '+best[0])
if (best[0] in finalWordClasses.keys()):
finalWordClasses[best[0]].append(word)
if len(benchmark)>0:
if (word in benchmark.keys()):
benchmarkValues["found"] = benchmarkValues["found"]+1
if (benchmark[word] == best[0]):
benchmarkValues["correct"] = benchmarkValues["correct"]+1
for idx, p in enumerate(allpredictions[word]):
benchmarkValues["predictors"][idx].append(1 if p[0]==benchmark[word] else 0)
if verbose:
print(word+"\t"+best[0]+"\t"+str(benchmark[word] == best[0])+"\t"+str(allpredictions[word]))
else:
if verbose:
print(word+"\t"+best[0]+"\tN/A\t"+str(allpredictions[word]))
else:
if verbose:
print(word+"\t"+best[0]+"\t"+str(best[1])+"\t"+str(wcount))
if (debug):
print('===================================')
# if wcount>(ucwtotcount/500):
# print("OK: \t"+word+"\t"+str(meanidx)+"\t"+str(ucwords[word])+"\t"+localProximityPredictor(word, proxWords)+"\ts="+"\t"+','.join(proxWords))
# elif debug:
# print(word+"\t"+"(ignored, "+str(wcount)+"/"+str(ucwtotcount)+")")
if len(benchmark)>0:
if verbose:
print('=== PERFORMANCE EVALUATION ==============================')
ncat = 0
unknown_words = []
correct_predictors = {}
ref_count = {} # reference (number of words that should fall in each category, by predictor; last idx=best choice)
attr_count = {} # attributions (number of words that fell in each category, by predictor; last idx=best choice)
for cat in ['character','place']:
ncat = ncat+1
correct_predictors[cat] = {}
attr_count[cat] = {}
ref_count[cat] = 0
for pred_idx in range(0,len(weights)+1):
correct_predictors[cat][pred_idx] = []
attr_count[cat][pred_idx] = []
for word, word_predictions in allpredictions.iteritems():
if word in benchmark.keys():
if (benchmark[word]==cat): # we only consider the words from this effective category
ref_count[cat] = ref_count[cat]+1
for pred_idx, prediction in enumerate(word_predictions):
correct_predictors[cat][pred_idx].append(1 if (prediction[0]==cat) else 0)
correct_predictors[cat][pred_idx+1].append(1 if (cat in finalWordClasses and word in finalWordClasses[cat]) else 0)
else:
unknown_words.append(word) # we store away words that are not listed in the benchmark file
for pred_idx, prediction in enumerate(word_predictions):
attr_count[cat][pred_idx].append(1 if prediction[0]==cat else 0)
attr_count[cat][pred_idx+1].append(1 if (cat in finalWordClasses and word in finalWordClasses[cat]) else 0)
precision_by_classes = {}
recall_by_classes = {}
for pred_idx in range(0,len(weights)+1):
precision_by_classes[pred_idx] = []
recall_by_classes[pred_idx] = []
for cat, cat_count in ref_count.iteritems():
for idx, pred_correct in correct_predictors[cat].iteritems():
precision_by_classes[idx].append((sum(pred_correct)/sum(attr_count[cat][idx]) if sum(attr_count[cat][idx])>0 else 1))
recall_by_classes[idx].append((sum(pred_correct)/cat_count if cat_count>0 else 0))
missing_words = list(set(benchmark.keys()) - set([w for ws in finalWordClasses.values() for w in ws]))
if (verbose):
if (len(unknown_words)>0):
print("! UNKNOWN WORDS: "+(", ".join(set(unknown_words))))
if (len(missing_words)>0):
print("! MISSING WORDS: "+(", ".join(missing_words)))
for idx in precision_by_classes.keys():
print(str(idx)+"\t"+"P="+str(sum(precision_by_classes[idx])/ncat)+"\t"+"R="+str(sum(recall_by_classes[idx])/ncat))
print('===========================================================')
sortKeys = []
for v in finalWordClasses['character']:
sortKeys.append(ucwords[v])
finalWordClasses['character'] = sortUsingList(finalWordClasses['character'], sortKeys)
sortKeys = []
for v in finalWordClasses['place']:
sortKeys.append(min(wsent[v]))
finalWordClasses['place'] = sortUsingList(finalWordClasses['place'], sortKeys)
if api:
jsonOut['substitutions'] = compoundNouns
jsonOut['classes'] = finalWordClasses
if verbose:
print('Total characters occurences: '+str(sum([ucwords[x] for x in finalWordClasses['character']])))
print('Total places occurences: '+str(sum([ucwords[x] for x in finalWordClasses['place']])))
if (mwsite!=False):
updateCachedResults(mwsite)
if len(benchmark)>0:
if (benchmarkValues["found"]>0):
if verbose:
print("========== BENCHMARK RESULTS ============")
print("Overall score: "+str(benchmarkValues["correct"]/benchmarkValues["found"]))
# for idx, b in enumerate([b for b in benchmarkValues["predictors"] if len(b)>0]):
# print("Prediction #"+str(idx+1)+": "+str( (sum(b)/len(b))))
# These are the colors that will be used in the plot
# color_sequence = ['#5EF1F2', '#00998F', '#E0FF66', '#740AFF', '#990000', '#FFFF80', '#FFFF00', '#FF5005', '#94FFB5', '#8F7C00', '#9DCC00', '#C20088', '#003380', '#FFA405', '#FFA8BB', '#426600', '#FF0010', '#F0A3FF', '#0075DC', '#993F00', '#4C005C', '#191919', '#005C31', '#2BCE48', '#FFCC99', '#808080']
# color_sequence = ['#1f77b4', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
color_sequence = ["#000000", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#FFFF00", "#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C", "#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800", "#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51", "#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94", "#7ED379", "#012C58", "#7A7BFF", "#D68E01", "#353339", "#78AFA1", "#FEB2C6", "#75797C", "#837393", "#943A4D", "#B5F4FF", "#D2DCD5", "#9556BD", "#6A714A", "#001325", "#02525F", "#0AA3F7", "#E98176", "#DBD5DD", "#5EBCD1", "#3D4F44", "#7E6405", "#02684E", "#962B75", "#8D8546", "#9695C5", "#E773CE", "#D86A78", "#3E89BE", "#CA834E", "#518A87", "#5B113C", "#55813B", "#E704C4", "#00005F", "#A97399", "#4B8160", "#59738A", "#FF5DA7", "#F7C9BF", "#643127", "#513A01", "#6B94AA", "#51A058", "#A45B02", "#1D1702", "#E20027", "#E7AB63", "#4C6001", "#9C6966", "#64547B", "#97979E", "#006A66", "#391406", "#F4D749", "#0045D2", "#006C31", "#DDB6D0", "#7C6571", "#9FB2A4", "#00D891", "#15A08A", "#BC65E9", "#FFFFFE", "#C6DC99", "#203B3C", "#671190", "#6B3A64", "#F5E1FF", "#FFA0F2", "#CCAA35", "#374527", "#8BB400", "#797868", "#C6005A", "#3B000A", "#C86240", "#29607C", "#402334", "#7D5A44", "#CCB87C", "#B88183", "#AA5199", "#B5D6C3", "#A38469", "#9F94F0", "#A74571", "#B894A6", "#71BB8C", "#00B433", "#789EC9", "#6D80BA", "#953F00", "#5EFF03", "#E4FFFC", "#1BE177", "#BCB1E5", "#76912F", "#003109", "#0060CD", "#D20096", "#895563", "#29201D", "#5B3213", "#A76F42", "#89412E", "#1A3A2A", "#494B5A", "#A88C85", "#F4ABAA", "#A3F3AB", "#00C6C8", "#EA8B66", "#958A9F", "#BDC9D2", "#9FA064", "#BE4700", "#658188", "#83A485", "#453C23", "#47675D", "#3A3F00", "#061203", "#DFFB71", "#868E7E", "#98D058", "#6C8F7D", "#D7BFC2", "#3C3E6E", "#D83D66", "#2F5D9B", "#6C5E46", "#D25B88", "#5B656C", "#00B57F", "#545C46", "#866097", "#365D25", "#252F99", "#00CCFF", "#674E60", "#FC009C", "#92896B"] # http://godsnotwheregodsnot.blogspot.ru/
nbCharacters = len(finalWordClasses['character'])
if graphs:
if (nbCharacters>0):
if (nbCharacters>MAX_CHARACTERS_GRAPH):
finalWordClasses['character'] = [w[0] for w in sorted_ucw if w[0] in finalWordClasses['character']][0:MAX_CHARACTERS_GRAPH]
chaptersPlaces = {}
for cnum, chapsentencesidx in chapters.iteritems():
chapterPlaces = {}
for w2idx, w2 in enumerate(finalWordClasses['place']):
chapterPlaces[w2] = [y for z in [sentences[idx]['words'] for idx in chapsentencesidx] for y in z].count(w2)
chapterPlace = keyForMaxValue(chapterPlaces)
chaptersPlaces[cnum] = (finalWordClasses['place'].index(chapterPlace) if chapterPlace!='' else -1)
eventGraph = {}
if (not api):
fig, ax = plt.subplots(1, 1, figsize=(18, 10))
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xticks(range(0, len(chapters)*nbCharacters, nbCharacters), chapters.keys(), fontsize=10, rotation=90)
plt.yticks(range(0, len(finalWordClasses['place']), 1), finalWordClasses['place'], fontsize=10)
for w1idx, w1 in enumerate(finalWordClasses['character']):
xs = []
ys = []
cidx = 0
for cnum, chapsentencesidx in chapters.iteritems():
if (chaptersPlaces[cnum]!=-1):
intersect = list(set(wsent[w1]) & set(chapsentencesidx))
if len(intersect)>0:
xs.append(cidx*nbCharacters+w1idx)
ys.append(chaptersPlaces[cnum])
cidx = cidx+1
# if the considered charactered is quoted more than once in this chapter, we add it to the list
if (len(xs)>1):
xs_sorted, ys_sorted = zip(*sorted(zip(xs, ys), key=operator.itemgetter(0), reverse=False))
plt.plot(xs_sorted, ys_sorted, 'o-', lw=2, color=color_sequence[w1idx % len(color_sequence)], label=w1, markersize=8, markeredgewidth=0.0, alpha=0.7)
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=10)
plt.show()
else:
eventGraph['chapters'] = chapters.keys()
eventGraph['places'] = finalWordClasses['place']
eventGraph['characters'] = {}
for w1idx, w1 in enumerate(finalWordClasses['character']):
xs = []
ys = []
cidx = 0
for cnum, chapsentencesidx in chapters.iteritems():
if (chaptersPlaces[cnum]!=-1):
intersect = list(set(wsent[w1]) & set(chapsentencesidx))
if len(intersect)>0:
xs.append(cidx)
ys.append(chaptersPlaces[cnum])
cidx = cidx+1
eventGraph['characters'][w1] = zip(*sorted(zip(xs, ys), key=operator.itemgetter(0), reverse=False))
jsonOut['eventGraph'] = eventGraph
intersects = []
for w1 in finalWordClasses['character']:
for w2 in [w for w in finalWordClasses['character'] if w!=w1]:
intersect = list(set(wsent[w1]) & set(wsent[w2]))
if (len(intersect)>0):
intersects.append([w1, w2, len(intersect)])
if (api):
jsonOut['charsGraph'] = intersects
else:
print("__________ Characters graph ______________")
print("graph characters {")
print(" "+"graph[layout=neato, splines=true, overlap=prism];")
for i in intersects:
print(" "+i[0]+" -- "+i[1]+" [len="+str(1+1/i[2])+", penwidth="+str(math.sqrt(i[2]))+"];") #weight="+str(len(intersect))+",
print("}")
bipRelations = {}
for w1 in finalWordClasses['character']:
bipRelations[w1] = {}
for cnum, chapsentencesidx in chapters.iteritems():
if (chaptersPlaces[cnum]!=-1):
if len(list(set(wsent[w1]) & set(chapsentencesidx)))>0:
storeCount(bipRelations[w1], finalWordClasses['place'][chaptersPlaces[cnum]])
if (api):
jsonOut['bipGraph'] = bipRelations
else:
print("__________ Bipartite graph ______________")
print("graph bip {")
print(" "+"graph[layout=neato, splines=true, overlap=prism];")
print(' "'+'","'.join(finalWordClasses['place'])+'"[shape=box,style=filled];')
for w1 in bipRelations.keys():
print(' "'+w1+'"[fontsize='+str(round(10+math.log(ucwords[w1])))+'];');
for c, r in bipRelations.iteritems():
for p, v in r.iteritems():
print(' "'+c+'"--"'+p+'"[len='+str(1+(1/v))+', penwidth='+str(math.sqrt(v))+'];')
print("}")
else:
print("Plot impossible: no character found.");
if (len(benchmark)>0):
# print(bookfile+"\t"+str(sum(precision_by_classes[len(precision_by_classes)-1])/ncat)+"\t"+str(sum(recall_by_classes[len(recall_by_classes)-1])/ncat))
benchStr = bookfile+"\t"+str()+"\t"+str(WORD_FREQUENCE_THRESHOLD) #+"\t"+str(ucwtotcount)+"\t"+str(ucwtotunique)+"\t"+str(sorted_ucw[0][1])+"\t"+str(len(re.findall(r'\w+', fulltext)))
ps = []
rs = []
for idx in precision_by_classes.keys():
p = sum(precision_by_classes[idx])/ncat
ps.append(p)
r = sum(recall_by_classes[idx])/ncat
rs.append(r)
benchStr = benchStr+u"\t"+'{:0.3f}'.format(p)+"\t"+'{:0.3f}'.format(r)
# benchStr = benchStr+"\n--> Averages: "+str(sum(ps)/len(ps))+" / "+str(sum(rs)/len(rs))
print(benchStr)
if (api):
print(json.dumps(jsonOut))
################################################################################################################################################################
try:
opts, args = getopt.getopt(sys.argv[1:], "abc:df:ghsx:w:v", ["help", "benchmark", "graphs", "api", "file=", "focus=", "save", "mwclient=", "mincount="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
sys.exit(2)
bookfile = u''
focus = u''
mwclienturl = u''
mwsite = False
benchmark = {}
dobenchmark = False
debug = False
verbose = False
graphs = False
api = False # API Mode, enable Web (full JSON) output
saveResults = False
for o, a in opts:
if o == "-d":
debug = True
elif o in ("-b", "--benchmark"):
dobenchmark = True
elif o in ("-g", "--graphs"):
graphs = True
elif o in ("-h", "--help"):
print("Options: -f:[abc:dgsx:w:v]")
print(" -h Help")
print(" -a API mode, output results in JSON format")
print(" -b Benchmark (against predefined reference file, see examples for formats and details)")
print(" -c Fixed min count (characters cited less than n times will be ignored)")
print(" -d DEBUG")
print(" -f Book source file, one chapter per line format (use autoformat.py for preprocessing)")
print(" -g Output as graphics")
print(" -s Save results (./cache folder needs to be writable)")
print(" -x Focus on one specific entity (useful when joined with DEBUG mode)")
print(" -v Verbose, print intermediary details")
print(" -w MWClient URL (typically fr.wikipedia.org)")
sys.exit()
elif o in ("-f", "--file"):
bookfile = a
elif o in ("-x", "--focus"):
focus = a
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-a", "--api"):
api = True
elif o in ("-s", "--save"):
saveResults = True
elif o in ("-c", "--mincount"):
if a=='auto':
dynamicFrequenceFilter = True
else:
WORD_FREQUENCE_THRESHOLD = int(a)
elif o in ("-w", "--mwclient"):
mwclienturl = a
mwsite = mwclient.Site(mwclienturl)
mwsite.compress = False
readCachedResults(mwsite)
else:
assert False, "unhandled option"
if (dobenchmark):
with codecs.open(bookfile[:-4]+'.corr', 'r', 'utf8') as f:
for i, raw_line in enumerate(f):
line = unicode(raw_line.strip()).split(u"\t")
if (len(line)>2):
if int(line[2])>=WORD_FREQUENCE_THRESHOLD:
benchmark[line[0]] = (line[1] if line[1] in ['character','place'] else 'other')
elif (len(line)>1):
benchmark[line[0]] = (line[1] if line[1] in ['character','place'] else 'other')
else:
print('Benchmark file error: line '+str(i)+' ignored.')
processBook(bookfile, mwsite, focus, benchmark, debug, verbose, graphs)
| gpl-2.0 |
mayblue9/bokeh | bokeh/server/blaze/config.py | 29 | 2291 | from __future__ import absolute_import
import logging
import warnings
from os.path import dirname, join
import numpy as np
import pandas as pd
from blaze import resource
import bokeh.server.tests
log = logging.getLogger(__name__)
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss = pd.DataFrame(gauss)
uniform = {'oneA': np.random.rand(qty),
'oneB': np.random.rand(qty),
'hundredA': np.random.rand(qty)*100,
'hundredB': np.random.rand(qty)*100}
uniform = pd.DataFrame(uniform)
bivariate = {'A1': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+1]),
'A2': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+2]),
'A3': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+3]),
'A4': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+4]),
'A5': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+5]),
'B': np.random.randn(qty),
'C': np.hstack([np.zeros(qty/2), np.ones(qty/2)])}
bivariate = pd.DataFrame(bivariate)
MESSAGE = """
Error loading hdfstore for %s.
Your version of Blaze is too old, or incompatible
or you have missing dependencies such as h5py and/or pytables.
"""
path = join(dirname(bokeh.server.tests.__file__), 'data', 'AAPL.hdf5')
try:
aapl = resource("hdfstore://%s::__data__" % path)
except Exception as e:
aapl = None
log.error(e)
warnings.warn(MESSAGE % "AAPL")
path = join(dirname(bokeh.server.tests.__file__), 'data', 'array.hdf5')
try:
arr = resource(path + "::" + "array")
except Exception as e:
arr = None
log.error(e)
warnings.warn(MESSAGE % "array")
path = join(dirname(bokeh.server.tests.__file__), 'data', 'CensusTracts.hdf5')
try:
census = resource("hdfstore://%s::__data__" % path)
except Exception as e:
census = None
log.error(e)
warnings.warn(MESSAGE % "CensusTracts")
data = dict(uniform=uniform,
gauss=gauss,
bivariate=bivariate)
if aapl:
data['aapl'] = aapl
if census:
data['census'] = census
if arr:
data['array'] = arr
| bsd-3-clause |
fbagirov/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
maheshakya/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
mtp401/airflow | airflow/hooks/base_hook.py | 5 | 2004 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
IshankGulati/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | examples/compose/plot_transformed_target.py | 17 | 8265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
======================================================
Effect of transforming the targets in regression model
======================================================
In this example, we give an overview of
:class:`~sklearn.compose.TransformedTargetRegressor`. We use two examples
to illustrate the benefit of transforming the targets before learning a linear
regression model. The first example uses synthetic data while the second
example is based on the Ames housing data set.
"""
# Author: Guillaume Lemaitre <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV
from sklearn.compose import TransformedTargetRegressor
from sklearn.metrics import median_absolute_error, r2_score
from sklearn.utils.fixes import parse_version
# %%
# Synthetic example
##############################################################################
# `normed` is being deprecated in favor of `density` in histograms
if parse_version(matplotlib.__version__) >= parse_version('2.1'):
density_param = {'density': True}
else:
density_param = {'normed': True}
# %%
# A synthetic random regression dataset is generated. The targets ``y`` are
# modified by:
#
# 1. translating all targets such that all entries are
# non-negative (by adding the absolute value of the lowest ``y``) and
# 2. applying an exponential function to obtain non-linear
# targets which cannot be fitted using a simple linear model.
#
# Therefore, a logarithmic (`np.log1p`) and an exponential function
# (`np.expm1`) will be used to transform the targets before training a linear
# regression model and using it for prediction.
X, y = make_regression(n_samples=10000, noise=100, random_state=0)
y = np.expm1((y + abs(y.min())) / 200)
y_trans = np.log1p(y)
# %%
# Below we plot the probability density functions of the target
# before and after applying the logarithmic functions.
f, (ax0, ax1) = plt.subplots(1, 2)
ax0.hist(y, bins=100, **density_param)
ax0.set_xlim([0, 2000])
ax0.set_ylabel('Probability')
ax0.set_xlabel('Target')
ax0.set_title('Target distribution')
ax1.hist(y_trans, bins=100, **density_param)
ax1.set_ylabel('Probability')
ax1.set_xlabel('Target')
ax1.set_title('Transformed target distribution')
f.suptitle("Synthetic data", y=0.06, x=0.53)
f.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# %%
# At first, a linear model will be applied on the original targets. Due to the
# non-linearity, the model trained will not be precise during
# prediction. Subsequently, a logarithmic function is used to linearize the
# targets, allowing better prediction even with a similar linear model as
# reported by the median absolute error (MAE).
f, (ax0, ax1) = plt.subplots(1, 2, sharey=True)
# Use linear model
regr = RidgeCV()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
# Plot results
ax0.scatter(y_test, y_pred)
ax0.plot([0, 2000], [0, 2000], '--k')
ax0.set_ylabel('Target predicted')
ax0.set_xlabel('True Target')
ax0.set_title('Ridge regression \n without target transformation')
ax0.text(100, 1750, r'$R^2$=%.2f, MAE=%.2f' % (
r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)))
ax0.set_xlim([0, 2000])
ax0.set_ylim([0, 2000])
# Transform targets and use same linear model
regr_trans = TransformedTargetRegressor(regressor=RidgeCV(),
func=np.log1p,
inverse_func=np.expm1)
regr_trans.fit(X_train, y_train)
y_pred = regr_trans.predict(X_test)
ax1.scatter(y_test, y_pred)
ax1.plot([0, 2000], [0, 2000], '--k')
ax1.set_ylabel('Target predicted')
ax1.set_xlabel('True Target')
ax1.set_title('Ridge regression \n with target transformation')
ax1.text(100, 1750, r'$R^2$=%.2f, MAE=%.2f' % (
r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)))
ax1.set_xlim([0, 2000])
ax1.set_ylim([0, 2000])
f.suptitle("Synthetic data", y=0.035)
f.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])
# %%
# Real-world data set
###############################################################################
#
# In a similar manner, the Ames housing data set is used to show the impact
# of transforming the targets before learning a model. In this example, the
# target to be predicted is the selling price of each house.
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import QuantileTransformer, quantile_transform
ames = fetch_openml(name="house_prices", as_frame=True)
# Keep only numeric columns
X = ames.data.select_dtypes(np.number)
# Remove columns with NaN or Inf values
X = X.drop(columns=['LotFrontage', 'GarageYrBlt', 'MasVnrArea'])
y = ames.target
y_trans = quantile_transform(y.to_frame(),
n_quantiles=900,
output_distribution='normal',
copy=True).squeeze()
# %%
# A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize
# the target distribution before applying a
# :class:`~sklearn.linear_model.RidgeCV` model.
f, (ax0, ax1) = plt.subplots(1, 2)
ax0.hist(y, bins=100, **density_param)
ax0.set_ylabel('Probability')
ax0.set_xlabel('Target')
ax0.text(s='Target distribution', x=1.2e5, y=9.8e-6, fontsize=12)
ax0.ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
ax1.hist(y_trans, bins=100, **density_param)
ax1.set_ylabel('Probability')
ax1.set_xlabel('Target')
ax1.text(s='Transformed target distribution', x=-6.8, y=0.479, fontsize=12)
f.suptitle("Ames housing data: selling price", y=0.04)
f.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# %%
# The effect of the transformer is weaker than on the synthetic data. However,
# the transformation results in an increase in :math:`R^2` and large decrease
# of the MAE. The residual plot (predicted target - true target vs predicted
# target) without target transformation takes on a curved, 'reverse smile'
# shape due to residual values that vary depending on the value of predicted
# target. With target transformation, the shape is more linear indicating
# better model fit.
f, (ax0, ax1) = plt.subplots(2, 2, sharey='row', figsize=(6.5, 8))
regr = RidgeCV()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
ax0[0].scatter(y_pred, y_test, s=8)
ax0[0].plot([0, 7e5], [0, 7e5], '--k')
ax0[0].set_ylabel('True target')
ax0[0].set_xlabel('Predicted target')
ax0[0].text(s='Ridge regression \n without target transformation', x=-5e4,
y=8e5, fontsize=12, multialignment='center')
ax0[0].text(3e4, 64e4, r'$R^2$=%.2f, MAE=%.2f' % (
r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)))
ax0[0].set_xlim([0, 7e5])
ax0[0].set_ylim([0, 7e5])
ax0[0].ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
ax1[0].scatter(y_pred, (y_pred - y_test), s=8)
ax1[0].set_ylabel('Residual')
ax1[0].set_xlabel('Predicted target')
ax1[0].ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
regr_trans = TransformedTargetRegressor(
regressor=RidgeCV(),
transformer=QuantileTransformer(n_quantiles=900,
output_distribution='normal'))
regr_trans.fit(X_train, y_train)
y_pred = regr_trans.predict(X_test)
ax0[1].scatter(y_pred, y_test, s=8)
ax0[1].plot([0, 7e5], [0, 7e5], '--k')
ax0[1].set_ylabel('True target')
ax0[1].set_xlabel('Predicted target')
ax0[1].text(s='Ridge regression \n with target transformation', x=-5e4,
y=8e5, fontsize=12, multialignment='center')
ax0[1].text(3e4, 64e4, r'$R^2$=%.2f, MAE=%.2f' % (
r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)))
ax0[1].set_xlim([0, 7e5])
ax0[1].set_ylim([0, 7e5])
ax0[1].ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
ax1[1].scatter(y_pred, (y_pred - y_test), s=8)
ax1[1].set_ylabel('Residual')
ax1[1].set_xlabel('Predicted target')
ax1[1].ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
f.suptitle("Ames housing data: selling price", y=0.035)
plt.show()
| bsd-3-clause |
CCS-Lab/hBayesDM | Python/hbayesdm/models/_ra_noLA.py | 1 | 9788 | from typing import Sequence, Union, Any
from collections import OrderedDict
from numpy import Inf, exp
import pandas as pd
from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import ra_preprocess_func
__all__ = ['ra_noLA']
class RaNola(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='ra',
model_name='noLA',
model_type='',
data_columns=(
'subjID',
'gain',
'loss',
'cert',
'gamble',
),
parameters=OrderedDict([
('rho', (0, 1, 2)),
('tau', (0, 1, 30)),
]),
regressors=OrderedDict([
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('rho', 'risk aversion'),
('tau', 'inverse temperature'),
]),
additional_args_desc=OrderedDict([
]),
**kwargs,
)
_preprocess_func = ra_preprocess_func
def ra_noLA(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Risk Aversion Task - Prospect Theory, without loss aversion (LA) parameter
Hierarchical Bayesian Modeling of the Risk Aversion Task
using Prospect Theory, without loss aversion (LA) parameter [Sokol-Hessner2009]_ with the following parameters:
"rho" (risk aversion), "tau" (inverse temperature).
.. [Sokol-Hessner2009] Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. Proceedings of the National Academy of Sciences of the United States of America, 106(13), 5035-5040. https://www.pnas.org/content/106/13/5035
User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.
For the Risk Aversion Task, there should be 5 columns of data
with the labels "subjID", "gain", "loss", "cert", "gamble". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:
- "subjID": A unique identifier for each subject in the data-set.
- "gain": Possible (50\%) gain outcome of a risky option (e.g. 9).
- "loss": Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).
- "cert": Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.
- "gamble": If gamble was taken, gamble == 1; else gamble == 0.
.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.
.. note::
``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.
.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
__ https://mc-stan.org/users/documentation/
Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "gain", "loss", "cert", "gamble".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.
``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.
``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:
.. code:: python
output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.
``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. Currently not available for this model.
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
Not used for this model.
Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:
- ``model``: String value that is the name of the model ('ra_noLA').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
Examples
--------
.. code:: python
from hbayesdm import rhat, print_fit
from hbayesdm.models import ra_noLA
# Run the model and store results in "output"
output = ra_noLA(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)
# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')
# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()
# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)
# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return RaNola(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
| gpl-3.0 |
peterk87/sistr_cmd | sistr/src/serovar_prediction/__init__.py | 1 | 24519 | import logging
import pandas as pd
from sistr.src.blast_wrapper import BlastReader
from sistr.src.serovar_prediction.constants import \
FLJB_FASTA_PATH, \
FLIC_FASTA_PATH, \
H2_FLJB_SIMILARITY_GROUPS, \
H1_FLIC_SIMILARITY_GROUPS, \
WZY_FASTA_PATH, \
WZX_FASTA_PATH, \
SEROGROUP_SIMILARITY_GROUPS, \
SEROVAR_TABLE_PATH, \
CGMLST_DISTANCE_THRESHOLD, MASH_DISTANCE_THRESHOLD, SISTR_DATA_DIR , SISTR_DB_URL
spp_name_to_roman = {'enterica': 'I',
'salamae': 'II',
'arizonae': 'IIIa',
'diarizonae': 'IIIb',
'houtenae': 'IV',
'bongori': 'V',
'indica': 'VI'}
class BlastResultMixin(object):
blast_results = None
top_result = None
is_trunc = False
is_missing = False
is_perfect_match = False
class WzxPrediction(BlastResultMixin):
serogroup = None
class WzyPrediction(BlastResultMixin):
serogroup = None
class SerogroupPrediction():
serogroup = None
wzx_prediction = None
wzy_prediction = None
class H1FliCPrediction(BlastResultMixin):
h1 = None
class H2FljBPrediction(BlastResultMixin):
h2 = None
class SerovarPrediction():
genome = None
serovar = None
serovar_cgmlst = None
cgmlst_distance = 1.0
cgmlst_matching_alleles = 0
cgmlst_found_loci = 0
cgmlst_genome_match = None
cgmlst_subspecies = None
serovar_antigen = None
serogroup = None
serogroup_prediction = None
h1 = None
h1_flic_prediction = None
h2 = None
h2_fljb_prediction = None
def get_antigen_name(qseqid):
"""
Get the antigen name from the BLASTN result query ID.
The last item delimited by | characters is the antigen name for all
antigens (H1, H2, serogroup)
@type qseqid: str
@param qseqid: BLASTN result query ID
@return: antigen name
"""
if qseqid:
return qseqid.split('|')[-1]
def serovar_table():
"""
Get the WHO 2007 Salmonella enterica serovar table with serogroup, H1 and
H2 antigen info as a Pandas DataFrame.
@return: Pandas DataFrame of serovar table
"""
return pd.read_csv(SEROVAR_TABLE_PATH)
class BlastAntigenGeneMixin:
def get_antigen_gene_blast_results(self, model_obj, antigen_gene_fasta,exclude=['N/A']):
blast_outfile = self.blast_runner.blast_against_query(antigen_gene_fasta)
blast_reader = BlastReader(blast_outfile,exclude)
is_missing = blast_reader.is_missing
model_obj.is_missing = is_missing
if not is_missing:
model_obj.blast_results = blast_reader.df_dict()
model_obj.top_result = blast_reader.top_result()
model_obj.is_perfect_match = blast_reader.is_perfect_match
model_obj.is_trunc = blast_reader.is_trunc
return model_obj
class SerogroupPredictor(BlastAntigenGeneMixin):
def __init__(self, blast_runner):
"""
SerogroupPredictor takes a initialized BlastRunner object where the
temp work folder has been created and the genome fasta has been copied
over and a BLASTN DB has been made for it.
This class then queries wzx and wzy against the genome using the
BlastRunner to get the wzx and wzy serogroup predictions.
@type blast_runner: app.blast_wrapper.BlastRunner
@param blast_runner: Initialized BlastRunner object
"""
self.blast_runner = blast_runner
self.wzx_prediction = WzxPrediction()
self.wzy_prediction = WzyPrediction()
self.serogroup_prediction = SerogroupPrediction()
def search_for_wzx(self):
self.wzx_prediction = self.get_antigen_gene_blast_results(self.wzx_prediction, WZX_FASTA_PATH)
if not self.wzx_prediction.is_missing and not self.wzx_prediction.top_result is None :
top_result = self.wzx_prediction.top_result
top_result_pident = top_result['pident']
top_result_length = top_result['length']
if top_result_pident < 88.0:
self.wzx_prediction.is_missing = True
self.wzx_prediction.serogroup = None
return
if top_result_length < 300:
self.wzx_prediction.is_missing = True
self.wzx_prediction.serogroup = None
return
if (top_result_length >= 300 and top_result_length < 500) and top_result_pident < 99.0:
self.wzx_prediction.is_missing = True
self.wzx_prediction.serogroup = None
return
self.wzx_prediction.serogroup = get_antigen_name(top_result['qseqid'])
def search_for_wzy(self):
self.wzy_prediction = self.get_antigen_gene_blast_results(self.wzy_prediction, WZY_FASTA_PATH)
if not self.wzy_prediction.is_missing and not self.wzy_prediction.top_result is None:
top_result = self.wzy_prediction.top_result
top_result_pident = top_result['pident']
top_result_length = top_result['length']
if top_result_pident < 88.0:
self.wzy_prediction.is_missing = True
self.wzy_prediction.serogroup = None
return
if top_result_length < 300:
self.wzy_prediction.is_missing = True
self.wzy_prediction.serogroup = None
return
if (top_result_length >= 300 and top_result_length < 500) and top_result_pident < 99.0:
self.wzy_prediction.is_missing = True
self.wzy_prediction.serogroup = None
return
self.wzy_prediction.serogroup = get_antigen_name(top_result['qseqid'])
def predict(self):
self.search_for_wzx()
self.search_for_wzy()
self.serogroup_prediction.wzx_prediction = self.wzx_prediction
self.serogroup_prediction.wzy_prediction = self.wzy_prediction
if self.wzx_prediction.is_perfect_match:
self.serogroup_prediction.serogroup = self.wzx_prediction.serogroup
if self.wzy_prediction.is_perfect_match:
self.serogroup_prediction.serogroup = self.wzy_prediction.serogroup
if self.wzy_prediction.is_perfect_match or self.wzx_prediction.is_perfect_match:
return
if self.wzx_prediction.is_missing and self.wzy_prediction.is_missing:
return
if self.wzx_prediction.is_missing:
self.serogroup_prediction.serogroup = self.wzy_prediction.serogroup
return
if self.wzy_prediction.is_missing:
self.serogroup_prediction.serogroup = self.wzx_prediction.serogroup
return
if self.wzy_prediction.serogroup == self.wzx_prediction.serogroup:
self.serogroup_prediction.serogroup = self.wzx_prediction.serogroup
return
top_wzy_result = self.wzy_prediction.top_result
top_wzx_result = self.wzx_prediction.top_result
wzx_bitscore = 0
wzy_bitscore = 0
if top_wzx_result is not None:
wzx_cov = top_wzx_result['coverage']
wzx_pident = top_wzx_result['pident']
wzx_bitscore = top_wzx_result['bitscore']
if top_wzy_result is not None:
wzy_cov = top_wzy_result['coverage']
wzy_pident = top_wzy_result['pident']
wzy_bitscore = top_wzy_result['bitscore']
if wzx_bitscore >= wzy_bitscore:
self.serogroup_prediction.serogroup = self.wzx_prediction.serogroup
else:
self.serogroup_prediction.serogroup = self.wzy_prediction.serogroup
class H1Predictor(BlastAntigenGeneMixin):
def __init__(self, blast_runner):
self.blast_runner = blast_runner
self.h1_prediction = H1FliCPrediction()
def predict(self,filter=['N/A']):
self.h1_prediction = self.get_antigen_gene_blast_results(self.h1_prediction, FLIC_FASTA_PATH,filter)
if not self.h1_prediction.is_missing and self.h1_prediction.top_result is not None:
if not self.h1_prediction.is_perfect_match:
df_blast_results = pd.DataFrame(self.h1_prediction.blast_results)
df_blast_results = df_blast_results[
(df_blast_results['mismatch'] <= 25) & (df_blast_results['length'] >= 700)]
if df_blast_results.shape[0] == 0:
df_blast_results = pd.DataFrame(self.h1_prediction.blast_results)
df_blast_results = df_blast_results[
(df_blast_results['mismatch'] <= 0) & (df_blast_results['length'] >= 400)]
if df_blast_results.shape[0] == 0:
self.h1_prediction.is_missing = True
self.h1_prediction.top_result = None
self.h1_prediction.h1 = None
return
df_blast_results_over1000 = df_blast_results[
(df_blast_results['mismatch'] <= 5) & (df_blast_results['length'] >= 1000)]
if df_blast_results_over1000.shape[0] > 0:
df_blast_results = df_blast_results_over1000.sort_values(by='mismatch')
else:
df_blast_results = df_blast_results.sort_values(by='bitscore', ascending=False)
result_dict = BlastReader.df_first_row_to_dict(df_blast_results)
result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'],
qend=result_dict['qend'],
sstart=result_dict['sstart'],
send=result_dict['send'],
qlen=result_dict['qlen'],
slen=result_dict['slen'])
self.h1_prediction.top_result = result_dict
self.h1_prediction.is_trunc = result_trunc
self.h1_prediction.h1 = get_antigen_name(self.h1_prediction.top_result['qseqid'])
class H2Predictor(BlastAntigenGeneMixin):
def __init__(self, blast_runner):
self.blast_runner = blast_runner
self.h2_prediction = H2FljBPrediction()
def predict(self,filter=['N/A']):
self.h2_prediction = self.get_antigen_gene_blast_results(self.h2_prediction, FLJB_FASTA_PATH,filter)
if not self.h2_prediction.is_missing and self.h2_prediction.top_result is not None:
if not self.h2_prediction.is_perfect_match :
top_result = self.h2_prediction.top_result
match_len = top_result['length']
pident = top_result['pident']
df_blast_results = pd.DataFrame(self.h2_prediction.blast_results)
df_blast_results = df_blast_results[
(df_blast_results['mismatch'] <= 50) & (df_blast_results['length'] >= 700)]
if df_blast_results.shape[0] == 0:
df_blast_results = pd.DataFrame(self.h2_prediction.blast_results)
df_blast_results = df_blast_results[
(df_blast_results['mismatch'] <= 0) & (df_blast_results['length'] >= 600)]
if df_blast_results.shape[0] == 0:
self.h2_prediction.is_missing = True
self.h2_prediction.top_result = None
self.h2_prediction.h2 = '-'
return
# short lower %ID matches are treated as missing or '-' for H2
if match_len <= 600 and pident < 88.0:
self.h2_prediction.h2 = '-'
self.h2_prediction.is_missing = True
return
df_blast_results_over1000 = df_blast_results[
(df_blast_results['mismatch'] <= 5) & (df_blast_results['length'] >= 1000)]
if df_blast_results_over1000.shape[0] > 0:
df_blast_results = df_blast_results_over1000.sort_values(by='mismatch')
else:
df_blast_results = df_blast_results.sort_values(by='bitscore', ascending=False)
result_dict = BlastReader.df_first_row_to_dict(df_blast_results)
result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'],
qend=result_dict['qend'],
sstart=result_dict['sstart'],
send=result_dict['send'],
qlen=result_dict['qlen'],
slen=result_dict['slen'])
self.h2_prediction.top_result = result_dict
self.h2_prediction.is_trunc = result_trunc
self.h2_prediction.h2 = get_antigen_name(self.h2_prediction.top_result['qseqid'])
if self.h2_prediction.is_missing:
self.h2_prediction.h2 = '-'
class SerovarPredictor:
serogroup = None
h1 = None
h2 = None
serovar = None
subspecies = None
def __init__(self, blast_runner, subspecies):
"""
"""
self.blast_runner = blast_runner
self.subspecies = subspecies
self.serogroup_predictor = SerogroupPredictor(self.blast_runner)
self.h1_predictor = H1Predictor(self.blast_runner)
self.h2_predictor = H2Predictor(self.blast_runner)
def predict_antigens(self):
self.h1_predictor.predict()
self.h2_predictor.predict()
self.serogroup_predictor.predict()
self.h1 = self.h1_predictor.h1_prediction.h1
self.h2 = self.h2_predictor.h2_prediction.h2
self.serogroup = self.serogroup_predictor.serogroup_prediction.serogroup
return self.serogroup, self.h1, self.h2
@staticmethod
def get_serovar(df, sg, h1, h2, spp):
h2_is_missing = '-' in h2
b_sg = df['Serogroup'].isin(sg)
b_h1 = df['H1'].isin(h1)
if h2_is_missing:
b_h2 = df['can_h2_be_missing']
else:
b_h2 = df['H2'].isin(h2)
if spp is not None:
b_spp = df['subspecies'] == spp
else:
b_spp = b_sg
df_prediction = df[(b_spp & b_sg & b_h1 & b_h2)]
logging.debug('Serovar prediction for %s %s:%s:%s is %s', spp, sg, h1, h2, list(df_prediction['Serovar']))
if df_prediction.shape[0] > 0:
return '|'.join(list(df_prediction['Serovar']))
@staticmethod
def lookup_serovar_antigens(df, serovar):
df_prediction = df.loc[df['Serovar'] == serovar]
spp = df_prediction['subspecies'].values.item(0)
sg = df_prediction['Serogroup'].values.item(0)
h1 = df_prediction['H1'].values.item(0)
h2 = df_prediction['H2'].values.item(0)
logging.debug('Serovar antigens for %s are: %s %s:%s:%s', serovar,spp, sg, h1, h2, )
return {'spp':spp,'sg':sg,'h1':h1,'h2':h2}
def predict_serovar_from_antigen_blast(self):
if not self.serogroup or not self.h2 or not self.h1:
self.predict_antigens()
df = serovar_table()
sg = self.serogroup
h1 = self.h1
h2 = self.h2
# no antigen results then serovar == '-:-:-'
if sg is None \
and h1 is None \
and h2 == '-':
self.serovar = '-:-:-'
return self.serovar
for sg_groups in SEROGROUP_SIMILARITY_GROUPS:
if sg in sg_groups:
sg = sg_groups
break
if sg is None:
sg = list(df['Serogroup'].unique())
if not isinstance(sg, list):
sg = [sg]
for h1_groups in H1_FLIC_SIMILARITY_GROUPS:
if h1 is None or h1 == '-':
break
if h1 in h1_groups:
h1 = h1_groups
break
if h1 is None:
h1 = list(df['H1'].unique())
if not isinstance(h1, list):
h1 = [h1]
for h2_groups in H2_FLJB_SIMILARITY_GROUPS:
if h2 is None or h2 == '-':
break
if h2 in h2_groups:
h2 = h2_groups
break
if not isinstance(h2, list):
h2 = [h2]
self.serovar = SerovarPredictor.get_serovar(df, sg, h1, h2, self.subspecies)
if self.serovar is None:
try:
spp_roman = spp_name_to_roman[self.subspecies]
except:
spp_roman = None
from collections import Counter
c = Counter(df.O_antigen[df.Serogroup.isin(sg)])
temp_o = c.most_common()
if 0 in temp_o and 0 in temp_o[0]:
o_antigen = c.most_common()[0][0]
else:
o_antigen = sg.pop()
h1_first = h1[0]
h2_first = h2[0]
if spp_roman:
self.serovar = '{} {}:{}:{}'.format(spp_roman, o_antigen, self.h1, self.h2)
else:
self.serovar = '{}:{}:{}'.format(o_antigen, self.h1, self.h2)
return self.serovar
def get_serovar_prediction(self):
serovar_pred = SerovarPrediction()
sg_pred = self.serogroup_predictor.serogroup_prediction
h1_pred = self.h1_predictor.h1_prediction
h2_pred = self.h2_predictor.h2_prediction
serovar_pred.serogroup_prediction = sg_pred
serovar_pred.serogroup = self.serogroup
serovar_pred.h1_flic_prediction = h1_pred
serovar_pred.h1 = self.h1
serovar_pred.h2_fljb_prediction = h2_pred
serovar_pred.h2 = self.h2
return serovar_pred
def overall_serovar_call(serovar_prediction, antigen_predictor):
"""
Predict serovar from cgMLST cluster membership analysis and antigen BLAST results.
SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results.
Antigen BLAST results will predict a particular serovar or list of serovars, however,
the cgMLST membership may be able to help narrow down the list of potential serovars.
Notes:
If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars,
then the serovar is assigned the cgMLST predicted serovar.
If all antigens are found, but an antigen serovar is not found then the serovar is assigned
a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction.
If the antigen predicted serovar does not match the cgMLST predicted serovar,
- the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less)
- otherwise, the serovar is antigen predicted serovar(s)
Args:
serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash])
antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results
Returns:
src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST
"""
assert isinstance(serovar_prediction, SerovarPrediction)
assert isinstance(antigen_predictor, SerovarPredictor)
h1 = antigen_predictor.h1
h2 = antigen_predictor.h2
sg = antigen_predictor.serogroup
spp = serovar_prediction.cgmlst_subspecies
if spp is None:
if 'mash_match' in serovar_prediction.__dict__:
spp = serovar_prediction.__dict__['mash_subspecies']
serovar_prediction.serovar_antigen = antigen_predictor.serovar
cgmlst_serovar = serovar_prediction.serovar_cgmlst
cgmlst_distance = float(serovar_prediction.cgmlst_distance)
h1_h2_share_group = False
for h2_groups in H2_FLJB_SIMILARITY_GROUPS:
if h1 in h2_groups and h2 in h2_groups:
h1_h2_share_group = True
break
if(h1_h2_share_group and h1 != '-' and cgmlst_serovar is not None):
cgmlst_serovar_antigens = antigen_predictor.lookup_serovar_antigens(serovar_table(),cgmlst_serovar)
h1_in_h2_similarity_groups = False
for h2_groups in H2_FLJB_SIMILARITY_GROUPS:
if cgmlst_serovar_antigens['h1'] in h2_groups:
h1_in_h2_similarity_groups = True
groups = h2_groups
break
h2_in_h1_similarity_groups = False
for h1_groups in H1_FLIC_SIMILARITY_GROUPS:
if cgmlst_serovar_antigens['h2'] in h1_groups:
h2_in_h1_similarity_groups = True
groups = h1_groups
break
if antigen_predictor.serogroup is None:
antigen_predictor.serogroup = '-'
if(h1_in_h2_similarity_groups):
temp = H2Predictor(antigen_predictor.blast_runner)
temp.predict(filter=groups)
antigen_predictor.h2_predictor = temp
h2 = temp.h2_prediction.h2
if h2 is None:
h2 = '-'
antigen_predictor.h2 = h2
serovar_prediction.h2 = h2
serovar_prediction.h2_fljb_prediction.h2 = h2
elif(h2_in_h1_similarity_groups):
temp = H1Predictor(antigen_predictor.blast_runner)
temp.predict(filter=groups)
antigen_predictor.h1_predictor = temp
h1 = temp.h1_prediction.h1
if h1 is None:
h1 = '-'
antigen_predictor.h1 = h1
serovar_prediction.h1 = h1
serovar_prediction.h1_flic_prediction.h1 = h1
antigen_predictor.predict_serovar_from_antigen_blast()
serovar_prediction.serovar_antigen = antigen_predictor.serovar
null_result = '-:-:-'
try:
spp_roman = spp_name_to_roman[spp]
except:
spp_roman = None
is_antigen_null = lambda x: (x is None or x == '' or x == '-')
if antigen_predictor.serovar is None:
if is_antigen_null(sg) and is_antigen_null(h1) and is_antigen_null(h2):
if spp_roman is not None:
serovar_prediction.serovar = '{} {}:{}:{}'.format(spp_roman, sg, h1, h2)
else:
serovar_prediction.serovar = '{}:{}:{}'.format(spp_roman, sg, h1, h2)
elif cgmlst_serovar is not None and cgmlst_distance <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
else:
serovar_prediction.serovar = null_result
if 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_dist = float(spd['mash_distance'])
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = spd['mash_serovar']
else:
serovars_from_antigen = antigen_predictor.serovar.split('|')
if not isinstance(serovars_from_antigen, list):
serovars_from_antigen = [serovars_from_antigen]
if cgmlst_serovar is not None:
if cgmlst_serovar in serovars_from_antigen:
serovar_prediction.serovar = cgmlst_serovar
elif 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_serovar = spd['mash_serovar']
mash_dist = float(spd['mash_distance'])
if mash_serovar in serovars_from_antigen:
serovar_prediction.serovar = mash_serovar
else:
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = mash_serovar
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
if serovar_prediction.h1 is None:
serovar_prediction.h1 = '-'
if serovar_prediction.h2 is None:
serovar_prediction.h2 = '-'
if serovar_prediction.serogroup is None:
serovar_prediction.serogroup = '-'
if serovar_prediction.serovar_antigen is None:
if spp_roman is not None:
serovar_prediction.serovar_antigen = '{} -:-:-'.format(spp_roman)
else:
serovar_prediction.serovar_antigen = '-:-:-'
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
return serovar_prediction | apache-2.0 |
kennethdecker/MagnePlane | src/hyperloop/Python/pod/drag.py | 4 | 1582 | from __future__ import print_function
import numpy as np
from scipy import interpolate as interp
from openmdao.api import IndepVarComp, Component, Group, Problem
import matplotlib.pylab as plt
class Drag(Component):
'''
Notes
-------
Interpolates the drag coefficient of the pod using data mach vs. drag coefficient data from CFD. Component interpolates drag coefficient
based on pod mach number.
Params
-------
M_pod : float
Pod mach number. Default value is .8
mach_array : array
Array of mach numbers corresponding to CFD data. Default value is np.zeros((1,7))
cd array : array
Array of drag coefficient values at correponding mach numbers from CFD. Default value is np.zeros((1,7))
Returns
-------
Cd : float
Interpolated drag coefficient based on pod mach number.
'''
def __init__(self):
super(Drag, self).__init__()
self.add_param('pod_mach', val = .8, desc = 'Pod Mach Number', units = 'unitless')
self.add_output('Cd', val = 1.0, desc = 'Drag Coefficient', units = 'unitless')
def solve_nonlinear(self, p, u ,r):
mach_array = np.array([ 0.5 , 0.6 , 0.625, 0.65 , 0.675, 0.7 , 0.725])
cd_array = np.array([ 0.04241176, 0.03947743, 0.04061261, 0.04464372, 0.05726695,
0.07248304, 0.08451007])
f = interp.UnivariateSpline(mach_array, cd_array)
u['Cd'] = float(f(p['pod_mach']))
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
root.add('p', Drag())
root.add('p1', IndepVarComp('M_pod', .8))
root.connect('p1.M_pod', 'p.pod_mach')
top.setup()
top.run()
print(top['p.Cd']) | apache-2.0 |
HolgerPeters/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 143 | 9461 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
ilyes14/scikit-learn | sklearn/grid_search.py | 61 | 37197 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
lbishal/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
pradyu1993/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
466152112/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
mantidproject/mantid | scripts/HFIR_4Circle_Reduction/mplgraphicsview3d.py | 3 | 10025 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=R0901,R0902,R0904
import numpy as np
import os
from qtpy.QtWidgets import QSizePolicy
from mantidqt.MPLwidgets import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
class MplPlot3dCanvas(FigureCanvas):
"""
Matplotlib 3D canvas class
"""
def __init__(self, parent=None):
"""
Initialization
:return:
"""
#
self._myParentWindow = parent
# Initialize the figure
self._myFigure = Figure()
# Init canvas
FigureCanvas.__init__(self, self._myFigure)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# Axes
self._myAxes = Axes3D(self._myFigure) # Canvas figure must be created for mouse rotation
self.format_coord_org = self._myAxes.format_coord
self._myAxes.format_coord = self.report_pixel
# color
self._colorMap = [0.5, 0.5, 0.5]
# Others
self._dataKey = 0
self._dataDict = dict()
# List of plots on canvas NOW
self._currPlotList = list()
self._currSurfaceList = list() # [{"xx":,"yy:","val:"}]
return
def clear_3d_plots(self):
"""
Clear all the figures from canvas
:return:
"""
for plt in self._currPlotList:
# del plt
self._myAxes.collections.remove(plt)
self._currPlotList = []
return
def get_data(self, data_key):
""" Get data by data key
:param data_key:
:return:
"""
assert data_key in self._dataDict, 'Data key %s does not exist in %s.' % (str(data_key),
str(self._dataDict.keys()))
return self._dataDict[data_key]
def import_3d_data(self, points, intensities):
"""
:param points:
:param intensities:
:return:
"""
# check
assert isinstance(points, np.ndarray) and points.shape[1] == 3, 'Shape is %s.' % str(points.shape)
assert isinstance(intensities, np.ndarray) and len(points) == len(intensities)
# set
self._dataDict[self._dataKey] = (points, intensities)
# update
r_value = self._dataKey
self._dataKey += 1
return r_value
def import_data_from_file(self, file_name):
""" File will have more than 4 columns, as X, Y, Z, Intensity, ...
:param file_name:
:return:
"""
# check
assert isinstance(file_name, str) and os.path.exists(file_name)
# parse
data_file = open(file_name, 'r')
raw_lines = data_file.readlines()
data_file.close()
# construct ND data array
xyz_points = np.zeros((len(raw_lines), 3))
intensities = np.zeros((len(raw_lines), ))
# parse
for i in range(len(raw_lines)):
line = raw_lines[i].strip()
# skip empty line
if len(line) == 0:
continue
# set value
terms = line.split(',')
for j in range(3):
xyz_points[i][j] = float(terms[j])
intensities[i] = float(terms[3])
# END-FOR
# Add to data structure for managing
self._dataDict[self._dataKey] = (xyz_points, intensities)
return_value = self._dataKey
self._dataKey += 1
return return_value
def plot_scatter(self, points, color_list):
"""
Plot points with colors in scatter mode
:param points:
:param color_list:
:return:
"""
# check: [TO DO] need MORE!
assert isinstance(points, np.ndarray)
assert len(points) == len(color_list)
assert points.shape[1] == 3, '3D data %s.' % str(points.shape)
#
# plot scatters
plt = self._myAxes.scatter(points[:, 0], points[:, 1], points[:, 2],
zdir='z', c=color_list)
self._currPlotList.append(plt)
self.draw()
return
def plot_scatter_auto(self, data_key, base_color=None):
"""
Plot data in scatter plot in an automatic mode
:param data_key: key to locate the data stored to this class
:param base_color: None or a list of 3 elements from 0 to 1 for RGB
:return:
"""
# Check
assert isinstance(data_key, int) and data_key >= 0
assert base_color is None or len(base_color) == 3
# get data and check
points = self._dataDict[data_key][0]
intensities = self._dataDict[data_key][1]
assert isinstance(points, np.ndarray)
assert isinstance(points.shape, tuple)
assert points.shape[1] == 3, '3D data %s.' % str(points.shape)
if len(points) > 1:
# set x, y and z limit
x_min = min(points[:, 0])
x_max = max(points[:, 0])
d_x = x_max - x_min
# print(x_min, x_max)
y_min = min(points[:, 1])
y_max = max(points[:, 1])
d_y = y_max - y_min
# print(y_min, y_max)
z_min = min(points[:, 2])
z_max = max(points[:, 2])
d_z = z_max - z_min
print(z_min, z_max)
# use default setup
self._myAxes.set_xlim(x_min-d_x, x_max+d_x)
self._myAxes.set_ylim(y_min-d_y, y_max+d_y)
self._myAxes.set_zlim(z_min-d_z, z_max+d_z)
# END-IF
# color map for intensity
color_list = list()
if base_color is None:
color_r = self._colorMap[0]
color_g = self._colorMap[1]
else:
color_r = base_color[0]
color_g = base_color[1]
if len(intensities) > 1:
min_intensity = min(intensities)
max_intensity = max(intensities)
diff = max_intensity - min_intensity
b_list = intensities - min_intensity
b_list = b_list/diff
num_points = len(points[:, 2])
for index in range(num_points):
color_tup = (color_r, color_g, b_list[index])
color_list.append(color_tup)
else:
color_list.append((color_r, color_g, 0.5))
# plot scatters
self._myAxes.scatter(points[:, 0], points[:, 1], points[:, 2], zdir='z', c=color_list)
self.draw()
def plot_surface(self):
"""
Plot surface
:return:
"""
print('Number of surf = ', len(self._currSurfaceList))
for surf in self._currSurfaceList:
plt = self._myAxes.plot_surface(surf["xx"], surf["yy"], surf["val"],
rstride=5, cstride=5, # color map??? cmap=cm.jet,
linewidth=1, antialiased=True)
self._currPlotList.append(plt)
# END-FOR
return
def report_pixel(self, x_d, y_d):
report = self.format_coord_org(x_d, y_d)
report = report.replace(",", " ")
return report
def set_axes_labels(self, x_label, y_label, z_label):
"""
:return:
"""
if x_label is not None:
self._myAxes.set_xlabel(x_label)
if y_label is not None:
self._myAxes.set_ylabel(y_label)
if z_label is not None:
self._myAxes.set_zlabel(z_label)
return
def set_color_map(self, color_r, color_g, color_b):
"""
Set the base line of color map
:param color_r:
:param color_g:
:param color_b:
:return:
"""
# Set color map
assert isinstance(color_r, float), 0 <= color_r < 1.
assert isinstance(color_g, float), 0 <= color_g < 1.
assert isinstance(color_b, float), 0 <= color_b < 1.
self._colorMap = [color_r, color_g, color_b]
def set_title(self, title, font_size):
"""
Set super title
:param title:
:return:
"""
self._myFigure.suptitle(title, fontsize=font_size)
return
def set_xyz_limits(self, points, limits=None):
""" Set XYZ axes limits
:param points:
:param limits: if None, then use default; otherwise, 3-tuple of 2-tuple
:return:
"""
# check
assert isinstance(points, np.ndarray)
# get limit
if limits is None:
limits = get_auto_xyz_limit(points)
# set limit to axes
self._myAxes.set_xlim(limits[0][0], limits[0][1])
self._myAxes.set_ylim(limits[1][0], limits[1][1])
self._myAxes.set_zlim(limits[2][0], limits[2][1])
return
def get_auto_xyz_limit(points):
""" Get default limit on X, Y, Z
Requirements: number of data points must be larger than 0.
:param points:
:return: 3-tuple of 2-tuple as (min, max) for X, Y and Z respectively
"""
# check
assert isinstance(points, np.ndarray)
dim = points.shape[1]
assert dim == 3
# set x, y and z limit
x_min = min(points[:, 0])
x_max = max(points[:, 0])
d_x = x_max - x_min
# print(x_min, x_max)
y_min = min(points[:, 1])
y_max = max(points[:, 1])
d_y = y_max - y_min
# print(y_min, y_max)
z_min = min(points[:, 2])
z_max = max(points[:, 2])
d_z = z_max - z_min
print(z_min, z_max)
# use default setup
x_lim = (x_min-d_x, x_max+d_x)
y_lim = (y_min-d_y, y_max+d_y)
z_lim = (z_min-d_z, z_max+d_z)
return x_lim, y_lim, z_lim
| gpl-3.0 |
bnaul/scikit-learn | sklearn/multioutput.py | 2 | 29459 | """
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: Tim Head <[email protected]>
# Author: Hugo Bowne-Anderson <[email protected]>
# Author: Chris Rivera <[email protected]>
# Author: Michael Williamson
# Author: James Ashton Nichols <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator, clone, MetaEstimatorMixin
from .base import RegressorMixin, ClassifierMixin, is_classifier
from .model_selection import cross_val_predict
from .utils import check_array, check_X_y, check_random_state
from .utils.metaestimators import if_delegate_has_method
from .utils.validation import (check_is_fitted, has_fit_parameter,
_check_fit_params, _deprecate_positional_args)
from .utils.multiclass import check_classification_targets
__all__ = ["MultiOutputRegressor", "MultiOutputClassifier",
"ClassifierChain", "RegressorChain"]
def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight, **fit_params)
else:
estimator.fit(X, y, **fit_params)
return estimator
def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None,
first_time=True):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes,
sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
class _MultiOutputEstimator(MetaEstimatorMixin,
BaseEstimator,
metaclass=ABCMeta):
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
self.estimator = estimator
self.n_jobs = n_jobs
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
classes : list of ndarray of shape (n_outputs,)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
X, y = check_X_y(X, y,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
first_time = not hasattr(self, 'estimators_')
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X, y[:, i],
classes[i] if classes is not None else None,
sample_weight, first_time) for i in range(y.shape[1]))
return self
def fit(self, X, y, sample_weight=None, **fit_params):
""" Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
Returns
-------
self : object
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement"
" a fit method")
X, y = self._validate_data(X, y, multi_output=True, accept_sparse=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
fit_params_validated = _check_fit_params(X, fit_params)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight,
**fit_params_validated)
for i in range(y.shape[1]))
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
Returns
-------
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self)
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement"
" a predict method")
X = check_array(X, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(e.predict)(X)
for e in self.estimators_)
return np.asarray(y).T
def _more_tags(self):
return {'multioutput_only': True}
class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
.. versionadded:: 0.18
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit` and :term:`predict`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for :meth:`fit`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
When individual estimators are fast to train or predict
using `n_jobs>1` can result in slower performance due
to the overhead of spawning processes.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
Attributes
----------
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_linnerud
>>> from sklearn.multioutput import MultiOutputRegressor
>>> from sklearn.linear_model import Ridge
>>> X, y = load_linnerud(return_X_y=True)
>>> clf = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y)
>>> clf.predict(X[[0]])
array([[176..., 35..., 57...]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
super().partial_fit(
X, y, sample_weight=sample_weight)
class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit`, :term:`score` and
:term:`predict_proba`.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
It does each target variable in y in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels.
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> from sklearn.neighbors import KNeighborsClassifier
>>> X, y = make_multilabel_classification(n_classes=3, random_state=0)
>>> clf = MultiOutputClassifier(KNeighborsClassifier()).fit(X, y)
>>> clf.predict(X[-2:])
array([[1, 1, 0], [1, 1, 1]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
def fit(self, X, Y, sample_weight=None, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying classifier supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
Returns
-------
self : object
"""
super().fit(X, Y, sample_weight, **fit_params)
self.classes_ = [estimator.classes_ for estimator in self.estimators_]
return self
@property
def predict_proba(self):
"""Probability estimates.
Returns prediction probabilities for each class of each output.
This method will raise a ``ValueError`` if any of the
estimators do not have ``predict_proba``.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data
Returns
-------
p : array of shape (n_samples, n_classes), or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
.. versionchanged:: 0.19
This function now returns a list of arrays where the length of
the list is ``n_outputs``, and each array is (``n_samples``,
``n_classes``) for that particular output.
"""
check_is_fitted(self)
if not all([hasattr(estimator, "predict_proba")
for estimator in self.estimators_]):
raise AttributeError("The base estimator should "
"implement predict_proba method")
return self._predict_proba
def _predict_proba(self, X):
results = [estimator.predict_proba(X) for estimator in
self.estimators_]
return results
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples
y : array-like of shape (n_samples, n_outputs)
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self)
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi target classification but has only one")
if y.shape[1] != n_outputs_:
raise ValueError("The number of outputs of Y for fit {0} and"
" score {1} should be same".
format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
def _more_tags(self):
# FIXME
return {'_skip_test': True}
class _BaseChain(BaseEstimator, metaclass=ABCMeta):
@_deprecate_positional_args
def __init__(self, base_estimator, *, order=None, cv=None,
random_state=None):
self.base_estimator = base_estimator
self.order = order
self.cv = cv
self.random_state = random_state
@abstractmethod
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
Returns
-------
self : object
"""
X, Y = self._validate_data(X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
check_array(X, accept_sparse=True)
self.order_ = self.order
if self.order_ is None:
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if self.order_ == 'random':
self.order_ = random_state.permutation(Y.shape[1])
elif sorted(self.order_) != list(range(Y.shape[1])):
raise ValueError("invalid order")
self.estimators_ = [clone(self.base_estimator)
for _ in range(Y.shape[1])]
if self.cv is None:
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
for chain_idx, estimator in enumerate(self.estimators_):
y = Y[:, self.order_[chain_idx]]
estimator.fit(X_aug[:, :(X.shape[1] + chain_idx)], y,
**fit_params)
if self.cv is not None and chain_idx < len(self.estimators_) - 1:
col_idx = X.shape[1] + chain_idx
cv_result = cross_val_predict(
self.base_estimator, X_aug[:, :col_idx],
y=y, cv=self.cv)
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
return self
def predict(self, X):
"""Predict on the data matrix X using the ClassifierChain model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_pred : array-like of shape (n_samples, n_classes)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=True)
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
if chain_idx == 0:
X_aug = X
else:
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_pred = Y_pred_chain[:, inv_order]
return Y_pred
class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain):
"""A multi-label model that arranges binary classifiers into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <classifierchain>`.
.. versionadded:: 0.19
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
classes_ : list
A list of arrays of length ``len(estimators_)`` containing the
class labels for each estimator in the chain.
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.multioutput import ClassifierChain
>>> X, Y = make_multilabel_classification(
... n_samples=12, n_classes=3, random_state=0
... )
>>> X_train, X_test, Y_train, Y_test = train_test_split(
... X, Y, random_state=0
... )
>>> base_lr = LogisticRegression(solver='lbfgs', random_state=0)
>>> chain = ClassifierChain(base_lr, order='random', random_state=0)
>>> chain.fit(X_train, Y_train).predict(X_test)
array([[1., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
>>> chain.predict_proba(X_test)
array([[0.8387..., 0.9431..., 0.4576...],
[0.8878..., 0.3684..., 0.2640...],
[0.0321..., 0.9935..., 0.0625...]])
See Also
--------
RegressorChain: Equivalent for regression
MultioutputClassifier: Classifies each output independently rather than
chaining.
References
----------
Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier
Chains for Multi-label Classification", 2009.
"""
def fit(self, X, Y):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
Returns
-------
self : object
"""
super().fit(X, Y)
self.classes_ = [estimator.classes_
for chain_idx, estimator
in enumerate(self.estimators_)]
return self
@if_delegate_has_method('base_estimator')
def predict_proba(self, X):
"""Predict probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Returns
-------
Y_prob : array-like of shape (n_samples, n_classes)
"""
X = check_array(X, accept_sparse=True)
Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1]
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_prob = Y_prob_chain[:, inv_order]
return Y_prob
@if_delegate_has_method('base_estimator')
def decision_function(self, X):
"""Evaluate the decision_function of the models in the chain.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
Y_decision : array-like of shape (n_samples, n_classes)
Returns the decision function of the sample for each model
in the chain.
"""
Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug)
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_decision = Y_decision_chain[:, inv_order]
return Y_decision
def _more_tags(self):
return {'_skip_test': True,
'multioutput_only': True}
class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain):
"""A multi-label model that arranges regressions into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <regressorchain>`.
.. versionadded:: 0.20
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
Examples
--------
>>> from sklearn.multioutput import RegressorChain
>>> from sklearn.linear_model import LogisticRegression
>>> logreg = LogisticRegression(solver='lbfgs',multi_class='multinomial')
>>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]]
>>> chain = RegressorChain(base_estimator=logreg, order=[0, 1]).fit(X, Y)
>>> chain.predict(X)
array([[0., 2.],
[1., 1.],
[2., 0.]])
See also
--------
ClassifierChain: Equivalent for classification
MultioutputRegressor: Learns each output independently rather than
chaining.
"""
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method at each step
of the regressor chain.
Returns
-------
self : object
"""
super().fit(X, Y, **fit_params)
return self
def _more_tags(self):
return {'multioutput_only': True}
| bsd-3-clause |
fredrikw/scipy | scipy/signal/windows.py | 11 | 53970 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import fftpack, linalg, special
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fftpack.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fftpack.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with `ifftshift`
and be multiplied by the result of an fft (SEE ALSO `fftfreq`).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
nodev-io/pytest-nodev | pytest_nodev/blacklists.py | 3 | 4406 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Alessandro Amici
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Regex's that blacklist problem modules and objects.
Potentially dangerous, crashing, hard hanging or simply annoying objects
belonging to the standard library and to and the pytest-nodev dependencies
are unconditionally blacklisted so that new users can test ``--candidates-from-stdlib``
without bothering with OS-level isolation.
"""
# python 2 support via python-future
from __future__ import unicode_literals
from builtins import open
MODULE_BLACKLIST = [
# underscore 'internal use' modules and objects
r'_|.*\._',
# crash
'icopen',
'ntpath',
'tests?',
r'.*\.tests?',
r'.*\.testing',
'xml.etree.ElementTree',
'pycallgraph',
'queue',
'idlelib',
# hangs
'itertools',
'bsddb',
# dangerous
'subprocess',
'smtpd',
# annoying
'antigravity', # not sure about this one :)
'this', # and about this one too!
'pydoc',
'tkinter',
'turtle',
'asyncio',
]
OBJECT_BLACKLIST = [
# underscore 'internal use' modules and objects
r'_|.*\._',
'.*:_',
# pytest internals
'_pytest.runner:exit',
'_pytest.runner:skip',
'_pytest.skipping:xfail',
'pytest_timeout:timeout_timer',
# unconditional exit
'faulthandler:_sigsegv',
'posix:abort',
'posix:_exit',
'posix:fork',
'posix:forkpty',
'pty:fork',
'_signal:default_int_handler',
'signal:default_int_handler',
'atexit.register',
# low level crashes
'numpy.fft.fftpack_lite:cffti',
'numpy.fft.fftpack_lite:rffti',
'appnope._nope:beginActivityWithOptions',
'ctypes:string_at',
'ctypes:wstring_at',
'gc:_dump_rpy_heap',
'gc:dump_rpy_heap',
'matplotlib._image:Image',
'getpass:getpass',
'getpass:unix_getpass',
'ensurepip:_run_pip',
'idlelib.rpc:SocketIO',
'numpy.core.multiarray_tests',
'.*base64.*code',
# uninterruptable hang
'compiler.ast:AugAssign',
'IPython.core.getipython:get_ipython',
'IPython.terminal.embed:InteractiveShellEmbed',
'IPython.terminal.interactiveshell:TerminalInteractiveShell',
'itertools:cycle',
'itertools:permutations',
'itertools:repeat',
'pydoc:apropos',
'logging.config:listen',
'multiprocessing.dummy.connection:Listener',
'multiprocessing.dummy.connection:Pipe',
# dangerous
'os.mkdir',
'os.command',
'pip.utils:rmtree',
'platform:popen',
'posix:popen',
'shutil.rmtree',
'turtle.write_docstringdict',
'multiprocessing.semaphore_tracker:main',
# annoying
'urllib.request:URLopener',
'urllib.request:FancyURLopener',
'urllib.request:urlopen',
'urllib.response:addbase',
'aifc.Error',
'aifc.Aifc_write',
'asyncore:file_dispatcher',
'asyncore:file_wrapper',
'sunau:open',
'sunau:Error',
'sunau:Au_write',
'tempfile:TemporaryFile',
'urllib.robotparser:RobotFileParser',
'wave:Wave_write',
'tempfile:mkdtemp',
'tempfile:mkstemp',
'tempfile:mktemp',
'multiprocessing.util',
]
# FIXME: this is a (hopefully!) temporary hack to permit adding to the object blacklist
try:
with open('object_blacklist.txt') as fp:
OBJECT_BLACKLIST += [line.rstrip('\n') for line in fp if line.strip()]
except IOError:
pass
| mit |
cl4rke/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
arabenjamin/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
arjoly/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
huobaowangxi/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
JT5D/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
franzpl/sweep | log_sweep_kaiser_window_script4/log_sweep_kaiser_window_script4.py | 2 | 2273 | #!/usr/bin/env python3
"""The influence of windowing of sweep signals when using a
Kaiser Window by fixing beta (=7) and fade_in (=0).
fstart = 1 Hz
fstop = 22050 Hz
FIR-Filter: Bandstop
Deconvolution: Windowed Excitation
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import ir_imitation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
awgn = -30
noise_system = measurement_chain.additive_noise(awgn)
# FIR-Filter-System
f_low = 5000
f_high = 6000
order = 2
bandstop_system = measurement_chain.bandstop(f_low, f_high, fs, order)
# Combinate system elements
system = measurement_chain.chained(bandstop_system, noise_system)
# Lists
beta = 7
fade_in = 0
fade_out_list = np.arange(0, 1001, 1)
t_noise = 0.004
# Spectrum of bandstop for reference
bandstop_f = calculation.butter_bandstop(f_low, f_high, fs, N * 2 + 1, order)
def get_results(fade_out):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_windowed_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_script4.txt", "w") as f:
for fade_out in fade_out_list:
ir = get_results(fade_out)
pnr = calculation.pnr_db(ir[0], ir[t_noise * fs:pad * fs])
spectrum_distance = calculation.vector_distance(
bandstop_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_out) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/test_nanops.py | 2 | 43102 | from functools import partial
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Series, isna
from pandas.core.arrays import DatetimeArray
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame:
def setup_method(self, method):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*arr_shape)
self.arr_float1 = np.random.randn(*arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, arr_shape)
self.arr_bool = np.random.randint(0, 2, arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype("S")
self.arr_utf = np.abs(self.arr_float).astype("U")
self.arr_date = np.random.randint(0, 20000, arr_shape).astype("M8[ns]")
self.arr_tdelta = np.random.randint(0, 20000, arr_shape).astype("m8[ns]")
self.arr_nan = np.tile(np.nan, arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf])
self.arr_obj = np.vstack(
[
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
self.arr_complex.astype("O"),
self.arr_str.astype("O"),
self.arr_utf.astype("O"),
self.arr_date.astype("O"),
self.arr_tdelta.astype("O"),
]
)
with np.errstate(invalid="ignore"):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
def teardown_method(self, method):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, "asm8", res)
res = getattr(res, "values", res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view("i8")
return targ, res
try:
if (
axis != 0
and hasattr(targ, "shape")
and targ.ndim
and targ.shape != res.shape
):
res = np.split(res, [targ.shape[0]], axis=0)[0]
except (ValueError, IndexError):
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except AssertionError:
# handle timedelta dtypes
if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == "O":
if targ.dtype.kind != "O":
res = res.astype(targ.dtype)
else:
try:
res = res.astype("c16")
except RuntimeError:
res = res.astype("f8")
try:
targ = targ.astype("c16")
except RuntimeError:
targ = targ.astype("f8")
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == "O":
raise
tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype)
tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype)
def check_fun_data(
self,
testfunc,
targfunc,
testarval,
targarval,
targarnanval,
check_dtype=True,
empty_targfunc=None,
**kwargs
):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
try:
res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
except BaseException as exc:
exc.args += (
"axis: {axis} of {of}".format(axis=axis, of=testarval.ndim - 1),
"skipna: {skipna}".format(skipna=skipna),
"kwargs: {kwargs}".format(kwargs=kwargs),
)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(
testfunc,
targfunc,
testarval2,
targarval2,
targarnanval2,
check_dtype=check_dtype,
empty_targfunc=empty_targfunc,
**kwargs
)
def check_fun(
self,
testfunc,
targfunc,
testar,
targar=None,
targarnan=None,
empty_targfunc=None,
**kwargs
):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(
testfunc,
targfunc,
testarval,
targarval,
targarnanval,
empty_targfunc=empty_targfunc,
**kwargs
)
except BaseException as exc:
exc.args += (
"testar: {testar}".format(testar=testar),
"targar: {targar}".format(targar=targar),
"targarnan: {targarnan}".format(targarnan=targarnan),
)
raise
def check_funs(
self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=True,
allow_tdelta=True,
allow_obj=True,
**kwargs
):
self.check_fun(testfunc, targfunc, "arr_float", **kwargs)
self.check_fun(testfunc, targfunc, "arr_float_nan", "arr_float", **kwargs)
self.check_fun(testfunc, targfunc, "arr_int", **kwargs)
self.check_fun(testfunc, targfunc, "arr_bool", **kwargs)
objs = [
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
]
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan", **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, "arr_complex", **kwargs)
self.check_fun(
testfunc, targfunc, "arr_complex_nan", "arr_complex", **kwargs
)
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan_nanj", **kwargs)
objs += [self.arr_complex.astype("O")]
if allow_str:
self.check_fun(testfunc, targfunc, "arr_str", **kwargs)
self.check_fun(testfunc, targfunc, "arr_utf", **kwargs)
objs += [self.arr_str.astype("O"), self.arr_utf.astype("O")]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, "arr_date", **kwargs)
objs += [self.arr_date.astype("O")]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, "arr_tdelta", **kwargs)
objs += [self.arr_tdelta.astype("O")]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == "convert":
targfunc = partial(
self._badobj_wrap, func=targfunc, allow_complex=allow_complex
)
self.check_fun(testfunc, targfunc, "arr_obj", **kwargs)
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == "O":
if allow_complex:
value = value.astype("c16")
else:
value = value.astype("f8")
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(
nanops.nanany,
np.any,
allow_all_nan=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
def test_nanall(self):
self.check_funs(
nanops.nanall,
np.all,
allow_all_nan=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
def test_nansum(self):
self.check_funs(
nanops.nansum,
np.sum,
allow_str=False,
allow_date=False,
allow_tdelta=True,
check_dtype=False,
empty_targfunc=np.nansum,
)
def test_nanmean(self):
self.check_funs(
nanops.nanmean,
np.mean,
allow_complex=False,
allow_obj=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
assert result == a
assert result == np_result
assert result.dtype == np.float64
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, "float128"):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ["mean", "std", "var", "skew", "kurt"]
group_b = ["min", "max"]
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
assert result.dtype == np.float64
else:
assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self.check_funs(
nanops.nanmedian,
np.median,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj="convert",
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanvar(self, ddof):
self.check_funs(
nanops.nanvar,
np.var,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanstd(self, ddof):
self.check_funs(
nanops.nanstd,
np.std,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("ddof", range(3))
def test_nansem(self, ddof):
from scipy.stats import sem
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nansem,
sem,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
allow_obj="convert",
ddof=ddof,
)
def _minmax_wrap(self, value, axis=None, func=None):
# numpy warns if all nan
res = func(value, axis)
if res.dtype.kind == "m":
res = np.atleast_1d(res)
return res
def test_nanmin(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isna(nans)
if res.ndim:
res[nullnan] = -1
elif (
hasattr(nullnan, "all")
and nullnan.all()
or not hasattr(nullnan, "all")
and nullnan
):
res = -1
return res
def test_nanargmax(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(
nanops.nanargmax,
func,
allow_str=False,
allow_obj=False,
allow_date=True,
allow_tdelta=True,
)
def test_nanargmin(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmin)
self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype("f8")
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.0
return result
@td.skip_if_no_scipy
def test_nanskew(self):
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nanskew,
func,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
@td.skip_if_no_scipy
def test_nankurt(self):
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nankurt,
func,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
def test_nanprod(self):
self.check_funs(
nanops.nanprod,
np.prod,
allow_str=False,
allow_date=False,
allow_tdelta=False,
empty_targfunc=np.nanprod,
)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs)
res11 = checkfun(
self.arr_float_nan_2d,
self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs)
res24 = checkfun(
self.arr_float_nan_2d,
self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs
)
res25 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1,
**kwargs
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs)
res11 = checkfun(
self.arr_float_nan_1d,
self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs)
res24 = checkfun(
self.arr_float_nan_1d,
self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs
)
res25 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1,
**kwargs
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson")
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
@td.skip_if_no_scipy
def test_nancorr_kendall(self):
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall")
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
@td.skip_if_no_scipy
def test_nancorr_spearman(self):
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman")
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ("ndim: {arr_float.ndim}".format(arr_float=arr_float),)
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, "ndim", True):
try:
res0 = func(value, *args, **kwargs)
if correct:
assert res0
else:
assert not res0
except BaseException as exc:
exc.args += ("dim: {}".format(getattr(value, "ndim", value)),)
raise
if not hasattr(value, "ndim"):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [
("arr_complex", False),
("arr_int", False),
("arr_bool", False),
("arr_str", False),
("arr_utf", False),
("arr_complex", False),
("arr_complex_nan", False),
("arr_nan_nanj", False),
("arr_nan_infj", True),
("arr_complex_nan_infj", True),
]
pairs_float = [
("arr_float", False),
("arr_nan", False),
("arr_float_nan", False),
("arr_nan_nan", False),
("arr_float_inf", True),
("arr_inf", True),
("arr_nan_inf", True),
("arr_float_nan_inf", True),
("arr_nan_nan_inf", True),
]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr,)
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype("f4"), correct)
self.check_bool(nanops._has_infs, val.astype("f2"), correct)
except BaseException as exc:
exc.args += (arr,)
raise
def test__isfinite(self):
pairs = [
("arr_complex", False),
("arr_int", False),
("arr_bool", False),
("arr_str", False),
("arr_utf", False),
("arr_complex", False),
("arr_complex_nan", True),
("arr_nan_nanj", True),
("arr_nan_infj", True),
("arr_complex_nan_infj", True),
]
pairs_float = [
("arr_float", False),
("arr_nan", True),
("arr_float_nan", True),
("arr_nan_nan", True),
("arr_float_inf", True),
("arr_inf", True),
("arr_nan_inf", True),
("arr_float_nan_inf", True),
("arr_nan_nan_inf", True),
]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr,)
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype("f4"), correct)
self.check_bool(func1, val.astype("f2"), correct)
except BaseException as exc:
exc.args += (arr,)
raise
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_complex.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_int.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_bool.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_str.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_utf.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_date.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, "test")
class TestEnsureNumeric:
def test_numeric_values(self):
# Test integer
assert nanops._ensure_numeric(1) == 1
# Test float
assert nanops._ensure_numeric(1.1) == 1.1
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(["1", "2", "3"], dtype=object)
assert np.allclose(nanops._ensure_numeric(s_values), values)
# Test non-convertible string ndarray
s_values = np.array(["foo", "bar", "baz"], dtype=object)
msg = r"could not convert string to float: '(foo|baz)'"
with pytest.raises(ValueError, match=msg):
nanops._ensure_numeric(s_values)
def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric("1"), 1.0)
assert np.allclose(nanops._ensure_numeric("1.1"), 1.1)
assert np.allclose(nanops._ensure_numeric("1+1j"), 1 + 1j)
def test_non_convertable_values(self):
msg = "Could not convert foo to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric("foo")
msg = "Could not convert {} to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric({})
msg = r"Could not convert \[\] to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric([])
class TestNanvarFixedValues:
# xref GH10242
def setup_method(self, method):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5, check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan, check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(
actual_variance, np.array([self.variance, 1.0 / 12]), check_less_precise=2
)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var, check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(
variance_2, (n - 1.0) / (n - 2.0) * var, check_less_precise=2
)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array(
[
[0.97303362, 0.21869576, 0.55560287],
[0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292],
]
)
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array(
[
[
[0.13762259, 0.05619224, 0.11568816],
[0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449],
],
[
[0.09519783, 0.16435395, 0.05082054],
[0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163],
],
]
)
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
assert np.isnan(var[3])
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
assert np.isnan(std[3])
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
assert result == 0.0
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues:
# xref GH 11974
def setup_method(self, method):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
assert skew == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(right_tailed) > 0
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
tm.assert_almost_equal(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
assert np.isnan(skew)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues:
# xref GH 11974
def setup_method(self, method):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
assert kurt == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(right_tailed) > 0
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
tm.assert_almost_equal(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
assert np.isnan(kurt)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
class TestDatetime64NaNOps:
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.xfail(reason="disabled")
# Enabling mean changes the behavior of DataFrame.mean
# See https://github.com/pandas-dev/pandas/issues/24752
def test_nanmean(self, tz):
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
expected = dti[1]
for obj in [dti, DatetimeArray(dti), Series(dti)]:
result = nanops.nanmean(obj)
assert result == expected
dti2 = dti.insert(1, pd.NaT)
for obj in [dti2, DatetimeArray(dti2), Series(dti2)]:
result = nanops.nanmean(obj)
assert result == expected
def test_use_bottleneck():
if nanops._BOTTLENECK_INSTALLED:
pd.set_option("use_bottleneck", True)
assert pd.get_option("use_bottleneck")
pd.set_option("use_bottleneck", False)
assert not pd.get_option("use_bottleneck")
pd.set_option("use_bottleneck", use_bn)
@pytest.mark.parametrize(
"numpy_op, expected",
[
(np.sum, 10),
(np.nansum, 10),
(np.mean, 2.5),
(np.nanmean, 2.5),
(np.median, 2.5),
(np.nanmedian, 2.5),
(np.min, 1),
(np.max, 4),
(np.nanmin, 1),
(np.nanmax, 4),
],
)
def test_numpy_ops(numpy_op, expected):
# GH8383
result = numpy_op(pd.Series([1, 2, 3, 4]))
assert result == expected
@pytest.mark.parametrize(
"operation",
[
nanops.nanany,
nanops.nanall,
nanops.nansum,
nanops.nanmean,
nanops.nanmedian,
nanops.nanstd,
nanops.nanvar,
nanops.nansem,
nanops.nanargmax,
nanops.nanargmin,
nanops.nanmax,
nanops.nanmin,
nanops.nanskew,
nanops.nankurt,
nanops.nanprod,
],
)
def test_nanops_independent_of_mask_param(operation):
# GH22764
s = pd.Series([1, 2, np.nan, 3, np.nan, 4])
mask = s.isna()
median_expected = operation(s)
median_result = operation(s, mask=mask)
assert median_expected == median_result
| apache-2.0 |
rhattersley/iris | lib/iris/tests/idiff.py | 10 | 3191 | #!/usr/bin/env python
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides "diff-like" comparison of images.
Currently relies on matplotlib for image processing so limited to PNG format.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import os.path
import shutil
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mimg
import matplotlib.widgets as mwidget
def diff_viewer(expected_fname, result_fname, diff_fname):
plt.figure(figsize=(16, 16))
plt.suptitle(os.path.basename(expected_fname))
ax = plt.subplot(221)
ax.imshow(mimg.imread(expected_fname))
ax = plt.subplot(222, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(result_fname))
ax = plt.subplot(223, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(diff_fname))
def accept(event):
# removes the expected result, and move the most recent result in
print('ACCEPTED NEW FILE: %s' % (os.path.basename(expected_fname), ))
os.remove(expected_fname)
shutil.copy2(result_fname, expected_fname)
os.remove(diff_fname)
plt.close()
def reject(event):
print('REJECTED: %s' % (os.path.basename(expected_fname), ))
plt.close()
ax_accept = plt.axes([0.7, 0.05, 0.1, 0.075])
ax_reject = plt.axes([0.81, 0.05, 0.1, 0.075])
bnext = mwidget.Button(ax_accept, 'Accept change')
bnext.on_clicked(accept)
bprev = mwidget.Button(ax_reject, 'Reject')
bprev.on_clicked(reject)
plt.show()
def step_over_diffs():
import iris.tests
image_dir = os.path.join(os.path.dirname(iris.tests.__file__),
'results', 'visual_tests')
diff_dir = os.path.join(os.path.dirname(iris.tests.__file__),
'result_image_comparison')
for expected_fname in sorted(os.listdir(image_dir)):
result_path = os.path.join(diff_dir, 'result-' + expected_fname)
diff_path = result_path[:-4] + '-failed-diff.png'
# if the test failed, there will be a diff file
if os.path.exists(diff_path):
expected_path = os.path.join(image_dir, expected_fname)
diff_viewer(expected_path, result_path, diff_path)
if __name__ == '__main__':
# Force iris.tests to use the ```tkagg``` backend by using the '-d'
# command-line argument as idiff is an interactive tool that requires a
# gui interface.
sys.argv.append('-d')
step_over_diffs()
| lgpl-3.0 |
rodluger/everest | everest/dvs.py | 1 | 9340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`dvs.py` - Data Validation Summary
------------------------------------------
Code for handling the "Data Validation Summary" plot.
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
import matplotlib.pyplot as pl
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
class Frame(object):
'''
A not-so-elegant object that adds an inset axis at a given
position within a matplotlib axes instance when called.
'''
def __init__(self, fig, ax, pos=[0, 0, 1, 1]):
'''
'''
self.fig = fig
self.ax = ax
self.pos = pos
def __call__(self, pos=None, on=True):
'''
'''
if pos is None:
pos = self.pos
res = []
for axis in np.atleast_1d(self.ax):
ax = self.fig.add_subplot(111, label=np.random.randn())
ax.set_axes_locator(InsetPosition(axis, pos))
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(5)
if not on:
ax.axis('off')
res.append(ax)
if len(res) == 1:
# This is a single axis
return res[0]
else:
# This is a list of axes
return res
class DVS(object):
'''
The "Data Validation Summary" figure container.
:param int nchunks: The number of light curve segments. Default 2
:param int pld_order: The PLD order. Default 3
'''
def __init__(self, nchunks=2, pld_order=3):
'''
'''
if pld_order <= 3:
hght = 28
nrows = 160
else:
hght = 32
nrows = 174 + hght * (pld_order - 3)
self.fig = pl.figure(figsize=(8.5, 11))
self.fig.subplots_adjust(
left=0.025 * (11 / 8.5), right=1 - 0.025 * (11 / 8.5), top=0.975,
bottom=0.025)
def GetFrame(y, x, dx, dy):
return Frame(self.fig, pl.subplot2grid((nrows, 160), (y, x),
colspan=dx, rowspan=dy))
self.title_left = GetFrame(0, 6, 44, 10)
self.title_center = GetFrame(0, 50, 66, 10)
self.title_right = GetFrame(0, 116, 44, 10)
self.body_top_left = GetFrame(12, 6, 102, 26)
self.body_top_right = [GetFrame(12, 116, 21, 26),
GetFrame(12, 139, 21, 26),
GetFrame(12 + hght, 116, 21, 26),
GetFrame(12 + hght, 139, 21, 26)]
self.body_left = [GetFrame(12 + hght * n, 6, 102, 26)
for n in range(1, 2 + pld_order)]
if (nchunks == 2) or (nchunks > 3):
self.body_right = [Frame(self.fig,
[pl.subplot2grid((nrows, 160),
(12 + hght * n, 116),
colspan=44, rowspan=13),
pl.subplot2grid((nrows, 160),
(25 + hght * n, 116), colspan=44,
rowspan=13)])
for n in range(2, 2 + pld_order)]
elif nchunks == 3:
self.body_right = [Frame(self.fig,
[pl.subplot2grid((nrows, 160),
(12 + hght * n, 116),
colspan=44,
rowspan=9),
pl.subplot2grid((nrows, 160),
(21 + hght * n, 116),
colspan=44,
rowspan=8),
pl.subplot2grid((nrows, 160),
(29 + hght * n, 116),
colspan=44,
rowspan=9)])
for n in range(2, 2 + pld_order)]
else:
self.body_right = [GetFrame(12 + hght * n, 116, 44, 26)
for n in range(2, 2 + pld_order)]
self.footer_left = GetFrame(nrows - 6, 6, 44, 6)
self.footer_center = GetFrame(nrows - 6, 50, 66, 6)
self.footer_right = GetFrame(nrows - 6, 116, 44, 6)
for ax in self.fig.get_axes():
ax.axis('off')
self.tcount = 0
self.lcount = 0
self.rcount = 0
def title(self):
'''
Returns the axis instance where the title will be printed
'''
return self.title_left(on=False), self.title_center(on=False), \
self.title_right(on=False)
def footer(self):
'''
Returns the axis instance where the footer will be printed
'''
return self.footer_left(on=False), self.footer_center(on=False), \
self.footer_right(on=False)
def top_right(self):
'''
Returns the axis instance at the top right of the page,
where the postage stamp and aperture is displayed
'''
res = self.body_top_right[self.tcount]()
self.tcount += 1
return res
def top_left(self):
'''
Returns the axis instance at the top left of the page,
where the final de-trended light curve is displayed
'''
return self.body_top_left()
def left(self):
'''
Returns the current axis instance on the left side of
the page where each successive light curve is displayed
'''
res = self.body_left[self.lcount]()
self.lcount += 1
return res
def right(self):
'''
Returns the current axis instance on the right side of the
page, where cross-validation information is displayed
'''
res = self.body_right[self.rcount]()
self.rcount += 1
return res
class CBV(object):
'''
'''
def __init__(self):
'''
'''
self.fig = pl.figure(figsize=(8.5, 11))
self.fig.subplots_adjust(
left=0.025 * (11 / 8.5), right=1 - 0.025 * (11 / 8.5),
top=0.975, bottom=0.025)
def GetFrame(y, x, dx, dy):
return Frame(self.fig, pl.subplot2grid((160, 160), (y, x),
colspan=dx, rowspan=dy))
self.title_left = GetFrame(0, 6, 44, 10)
self.title_center = GetFrame(0, 50, 66, 10)
self.title_right = GetFrame(0, 116, 44, 10)
self._body = [GetFrame(12, 6, 148, 42),
GetFrame(62, 6, 148, 42),
GetFrame(112, 6, 148, 42)]
for ax in self.fig.get_axes():
ax.axis('off')
self.bcount = 0
def title(self):
'''
Returns the axis instance where the title will be printed
'''
return self.title_left(on=False), self.title_center(on=False), \
self.title_right(on=False)
def body(self):
'''
Returns the axis instance where the light curves will be shown
'''
res = self._body[self.bcount]()
self.bcount += 1
return res
class OVERFIT(object):
'''
'''
def __init__(self):
'''
'''
self.fig = pl.figure(figsize=(8.5, 11))
self.fig.subplots_adjust(
left=0.025 * (11 / 8.5), right=1 - 0.025 * (11 / 8.5),
top=0.975, bottom=0.025, hspace=0.5, wspace=0.5)
def GetFrame(y, x, dx, dy):
return Frame(self.fig, pl.subplot2grid((160, 160), (y, x),
colspan=dx, rowspan=dy))
self.title_left = GetFrame(0, 6, 44, 10)
self.title_center = GetFrame(0, 50, 66, 10)
self.title_right = GetFrame(0, 116, 44, 10)
for ax in self.fig.get_axes():
ax.axis('off')
kw = dict(colspan=40, rowspan=10)
kwh = dict(colspan=10, rowspan=10)
self.axes1 = [pl.subplot2grid((70, 60), (5, 5), **kw),
pl.subplot2grid((70, 60), (26, 5), **kw),
pl.subplot2grid((70, 60), (47, 5), **kw)]
self.axes1h = [pl.subplot2grid((70, 60), (5, 45), **kwh),
pl.subplot2grid((70, 60), (26, 45), **kwh),
pl.subplot2grid((70, 60), (47, 45), **kwh)]
self.axes2 = [pl.subplot2grid((70, 60), (15, 5), **kw),
pl.subplot2grid((70, 60), (36, 5), **kw),
pl.subplot2grid((70, 60), (57, 5), **kw)]
self.axes2h = [pl.subplot2grid((70, 60), (15, 45), **kwh),
pl.subplot2grid((70, 60), (36, 45), **kwh),
pl.subplot2grid((70, 60), (57, 45), **kwh)]
for ax in [self.axes1, self.axes1h, self.axes2, self.axes2h]:
for axis in ax:
axis.tick_params(direction='in')
def title(self):
'''
Returns the axis instance where the title will be printed
'''
return self.title_left(on=False), self.title_center(on=False), \
self.title_right(on=False)
| mit |
ecervera/mindstorms-nb | nxt/functions.py | 1 | 7333 | import json
import shutil
from IPython.core.display import display, HTML
def configure(n):
config = {
'version' : 'nxt',
'number' : n
}
with open('../task/robot_config.json', 'w') as f:
json.dump(config, f)
shutil.copyfile('./functions.py', '../task/functions.py')
print("\x1b[32mConfiguració completa, podeu continuar.\x1b[0m")
display(HTML('<p>Ara ja podeu continuar, començant la primera tasca de programació: provareu el robot a vore si respon i es mou correctament.</p><h2><a href="../task/index.ipynb" target="_blank">>>> Prova de connexió</a></h2>'))
def next_notebook(nb):
if nb=='moviments':
display(HTML('<p>Ja podeu passar a la pàgina següent, on aprendreu a controlar els moviments del robot:</p><h2><a href="motors.ipynb" target="_blank">>>> Moviments del robot</a></h2>'))
elif nb=='quadrat':
display(HTML('<p>Ara ja podeu continuar, bona sort!</p><h2><a href="quadrat.ipynb" target="_blank">>>> Exercici de moviment</a></h2>'))
elif nb=='sensors':
display(HTML('<p>Fins ara heu aprés a controlar el moviment del robot, i també a programar bucles, no està gens malament!</p><p>Per a continuar, anem a vore els altres components del robot, els sensors, que ens permetran fer programes encara més sofisticats.</p><h2><a href="sensors.ipynb" target="_blank">>>> Sensors</a></h2>'))
elif nb=='touch':
display(HTML('<p>Ara ja podeu passar al primer exercici amb sensors:</p><h2><a href="touch.ipynb" target="_blank">>>> Tacte</a></h2>'))
elif nb=='navigation':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="navigation.ipynb" target="_blank">>>> Exercici de navegació</a></h2>'))
elif nb=='sound':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="sound.ipynb" target="_blank">>>> Sensor de so</a></h2>'))
elif nb=='light':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="light.ipynb" target="_blank">>>> Sensor de llum</a></h2>'))
elif nb=='ultrasonic':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="ultrasonic.ipynb" target="_blank">>>> Sensor ultrasònic</a></h2>'))
elif nb=='sumo':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="sumo.ipynb" target="_blank">>>> El Gran Repte</a></h2>'))
else:
pass
import nxt.bluesock
import nxt.motor
import math
import time
from bluetooth.btcommon import BluetoothError
def connect():
global brick
global mB; global mC
global s1; global s2; global s3; global s4
global tempo
global connected_robot
with open('robot_config.json', 'r') as f:
config = json.load(f)
n = config['number']
try:
address = {2: '00:16:53:0A:9B:72', \
3: '00:16:53:0A:9D:F2', \
4: '00:16:53:0A:5C:72',
5: '00:16:53:08:D5:59', \
6: '00:16:53:08:DE:51', \
7: '00:16:53:0A:5A:B4', \
8: '00:16:53:0A:9B:27', \
9: '00:16:53:0A:9E:2C', \
10: '00:16:53:17:92:8A', \
11: '00:16:53:17:94:E0', \
12: '00:16:53:1A:C6:BD'}
brick = nxt.bluesock.BlueSock(address[n]).connect()
mB = nxt.motor.Motor(brick, nxt.motor.PORT_B)
mC = nxt.motor.Motor(brick, nxt.motor.PORT_C)
s1 = nxt.sensor.Touch(brick, nxt.sensor.PORT_1)
s2 = nxt.sensor.Sound(brick, nxt.sensor.PORT_2)
s2.set_input_mode(0x08,0x80) # dB adjusted, percentage
s3 = nxt.sensor.Light(brick, nxt.sensor.PORT_3)
s3.set_illuminated(True)
s3.set_input_mode(0x05,0x80) # Light active, percentage
s4 = nxt.sensor.Ultrasonic(brick, nxt.sensor.PORT_4)
tempo = 0.5
connected_robot = n
print("\x1b[32mRobot %d connectat.\x1b[0m" % n)
except BluetoothError as e:
errno, errmsg = eval(e.args[0])
if errno==16:
print("\x1b[31mNo es pot connectar, hi ha un altre programa ocupant la connexió.\x1b[0m")
elif errno==13:
print("\x1b[31mNo es pot connectar, el dispositiu no està emparellat.\x1b[0m")
elif errno == 112:
print("\x1b[31mNo es troba el brick, assegurat que estiga encés.\x1b[0m")
else:
print("Error %d: %s" % (errno, errmsg))
except KeyError:
print("\x1b[31mNúmero de robot incorrecte.\x1b[0m")
def disconnect():
try:
brick.sock.close()
print("\x1b[32mRobot %d desconnectat.\x1b[0m" % connected_robot)
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def stop():
try:
mB.brake()
mC.brake()
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def forward(speed=100,speed_B=100,speed_C=100):
move(speed_B=min(abs(speed),abs(speed_B)),speed_C=min(abs(speed),abs(speed_C)))
def backward(speed=100,speed_B=100,speed_C=100):
move(speed_B=-min(abs(speed),abs(speed_B)),speed_C=-min(abs(speed),abs(speed_C)))
def left(speed=100):
move(speed_B=0,speed_C=abs(speed))
def left_sharp(speed=100):
move(speed_B=-abs(speed),speed_C=abs(speed))
def right(speed=100):
move(speed_B=abs(speed),speed_C=0)
def right_sharp(speed=100):
move(speed_B=abs(speed),speed_C=-abs(speed))
def move(speed_B=0,speed_C=0):
max_speed = 100
speed_B = int(speed_B)
speed_C = int(speed_C)
if speed_B > 100:
speed_B = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_B < -100:
speed_B = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C > 100:
speed_C = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C < -100:
speed_C = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
try:
mB.run(-int(speed_B*max_speed/100))
mC.run(int(speed_C*max_speed/100))
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def touch():
return s1.is_pressed()
def sound():
return s2.get_loudness()
def light():
return s3.get_lightness()
from nxt.telegram import InvalidOpcodeError, InvalidReplyError
def ultrasonic():
global s4
try:
return s4.get_distance()
except (InvalidOpcodeError, InvalidReplyError):
disconnect()
print("\x1b[33mError de connexió, reintentant...\x1b[0m")
time.sleep(1)
connect(connected_robot)
return s4.get_distance()
def play_sound(s):
brick.play_sound_file(False, bytes((s+'.rso').encode('ascii')))
def say(s):
play_sound(s)
def play_tone(f,t):
try:
brick.play_tone_and_wait(f, int(t*1000*tempo))
time.sleep(0.01)
except:
pass
from IPython.display import clear_output
def read_and_print(sensor):
try:
while True:
clear_output(wait=True)
print(sensor())
except KeyboardInterrupt:
pass
def test_sensors():
try:
while True:
clear_output(wait=True)
print(" Touch: %d\n Light: %d\n Sound: %d\nUltrasonic: %d" % (touch(),light(),sound(), ultrasonic()))
except KeyboardInterrupt:
pass
import matplotlib.pyplot as plt
def plot(l):
plt.plot(l)
| mit |
LohithBlaze/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/scalar/timestamp/test_timestamp.py | 3 | 37196 | """ test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import unicodedata
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3, PY2
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
# Work around https://github.com/pandas-dev/pandas/issues/22342
# different normalizations
if not PY2:
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day,)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
# GH#21336, GH#21365
dt = Timestamp('2100-01-01 00:00:00')
assert dt.resolution == Timedelta(nanoseconds=1)
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('z', ['Z0', 'Z00'])
def test_constructor_invalid_Z0_isostring(self, z):
# GH 8910
with pytest.raises(ValueError):
Timestamp('2014-11-02 01:00{}'.format(z))
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
@pytest.mark.parametrize('tz', [None, pytz.timezone('US/Pacific')])
def test_disallow_setting_tz(self, tz):
# GH 3746
ts = Timestamp('2010')
with pytest.raises(AttributeError):
ts.tz = tz
@pytest.mark.parametrize('offset', ['+0300', '+0200'])
def test_construct_timestamp_near_dst(self, offset):
# GH 20854
expected = Timestamp('2016-10-30 03:00:00{}'.format(offset),
tz='Europe/Helsinki')
result = Timestamp(expected, tz='Europe/Helsinki')
assert result == expected
@pytest.mark.parametrize('arg', [
'2013/01/01 00:00:00+09:00', '2013-01-01 00:00:00+09:00'])
def test_construct_with_different_string_format(self, arg):
# GH 12064
result = Timestamp(arg)
expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
assert result == expected
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.round(Timestamp(x).value / 1e9)) ==
int(np.round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize('value, check_kwargs', [
[946688461000000000, {}],
[946688461000000000 / long(1000), dict(unit='us')],
[946688461000000000 / long(1000000), dict(unit='ms')],
[946688461000000000 / long(1000000000), dict(unit='s')],
[10957, dict(unit='D', h=0)],
pytest.param((946688461000000000 + 500000) / long(1000000000),
dict(unit='s', us=499, ns=964),
marks=pytest.mark.skipif(not PY3,
reason='using truediv, so these'
' are like floats')),
pytest.param((946688461000000000 + 500000000) / long(1000000000),
dict(unit='s', us=500000),
marks=pytest.mark.skipif(not PY3,
reason='using truediv, so these'
' are like floats')),
pytest.param((946688461000000000 + 500000) / long(1000000),
dict(unit='ms', us=500),
marks=pytest.mark.skipif(not PY3,
reason='using truediv, so these'
' are like floats')),
pytest.param((946688461000000000 + 500000) / long(1000000000),
dict(unit='s'),
marks=pytest.mark.skipif(PY3,
reason='get chopped in py2')),
pytest.param((946688461000000000 + 500000000) / long(1000000000),
dict(unit='s'),
marks=pytest.mark.skipif(PY3,
reason='get chopped in py2')),
pytest.param((946688461000000000 + 500000) / long(1000000),
dict(unit='ms'),
marks=pytest.mark.skipif(PY3,
reason='get chopped in py2')),
[(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)],
[(946688461000000000 + 500000000) / long(1000000),
dict(unit='ms', us=500000)],
[946688461000000000 / 1000.0 + 5, dict(unit='us', us=5)],
[946688461000000000 / 1000.0 + 5000, dict(unit='us', us=5000)],
[946688461000000000 / 1000000.0 + 0.5, dict(unit='ms', us=500)],
[946688461000000000 / 1000000.0 + 0.005, dict(unit='ms', us=5, ns=5)],
[946688461000000000 / 1000000000.0 + 0.5, dict(unit='s', us=500000)],
[10957 + 0.5, dict(unit='D', h=12)]])
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
assert ts.value == expected_value + 4 * 3600 * 1000000000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp('20130501T071545.123456789')
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1293840000000000010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate(object):
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
assert r == 2342145.5
def test_compare_2000(self):
r = Timestamp('2000-04-12').to_julian_date()
assert r == 2451646.5
def test_compare_2100(self):
r = Timestamp('2100-08-12').to_julian_date()
assert r == 2488292.5
def test_compare_hour01(self):
r = Timestamp('2000-08-12T01:00:00').to_julian_date()
assert r == 2451768.5416666666666666
def test_compare_hour13(self):
r = Timestamp('2000-08-12T13:00:00').to_julian_date()
assert r == 2451769.0416666666666666
class TestTimestampConversion(object):
def test_conversion(self):
# GH#9255
ts = Timestamp('2000-01-01')
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, 'ns')
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows_python_3
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp('20090415', tz=gettz('US/Eastern'), freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.max.to_pydatetime()).value / 1000 ==
Timestamp.max.value / 1000)
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 ==
Timestamp.min.value / 1000)
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp('2009-04-15 16:17:18', tz='US/Eastern')
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period('D')
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/calibration/plot_compare_calibration.py | 82 | 5012 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
EggInTheShell/TodoCounting | evaluate_fcn.py | 1 | 4907 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
VGGの学習済み重みを使わない
"""
from keras import backend as K
from keras.models import Sequential, Model, model_from_json
from keras.layers import Dense, Activation, Reshape, Flatten, Dropout, TimeDistributed, Input, merge, GaussianNoise, BatchNormalization
from keras.layers import LSTM
from keras.layers import Convolution2D, Deconvolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.objectives import categorical_crossentropy
from keras.regularizers import l2
from keras.models import model_from_yaml
from keras.utils import np_utils
from keras.initializations import normal, zero
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.applications.vgg16 import VGG16
import pickle
import time
import numpy as np
import pickle
import matplotlib.pyplot as plt
from PIL import Image
from data_utils import *
from settings import *
def batch_generator(datapath='traindata_split.dump', batchsize=128, step=128, new_shape=[64,64]):
with open(datapath, mode='rb') as f:
data = pickle.load(f)
image = data['image']
label = data['label']
numData = image.shape[0]
idx = 0
# print(depthcolor.shape)
# print(np.max(depth))
K = np.max(image)
print(K)
while True:
if idx == 0:
perm1 = np.arange(batchsize * step)
np.random.shuffle(perm1)
x,y = np.random.randint(64, size=2)
image_crop = image[:,y:y+256,x:x+256].astype(np.float32)/255
label_crop = label[:,y:y+256,x:x+256].astype(np.float32)/255
batchx = image_crop[perm1[idx:idx + batchsize]]
batchx = np.transpose(batchx, [0, 3, 1, 2])
batchy = label_crop[perm1[idx:idx + batchsize]]
batchy = np.transpose(batchy, [0, 3, 1, 2])
# print(batchx1.shape)
# print(batchx2.shape)
# print(batchy.shape)
yield batchx, batchy
if idx + batchsize >= batchsize * step:
idx = 0
elif idx + batchsize >= image_crop.shape[0]:
idx = 0
else:
idx += batchsize
# parameters
threshold = 0.1
EPOCH = 10
BATCHSIZE = 8
NUM_DATA = 1747
size = [256,256]
num_batches = int(NUM_DATA / BATCHSIZE)
# load model
loadpath = DATA_DIR + 'weight/fc1e0'
f = open(loadpath+'.json', 'r')
json_string = f.read()
f.close()
train_model = model_from_json(json_string)
train_model.load_weights(loadpath+'_W.hdf5')
datapath = DATA_DIR + 'patches/traindata.pkl'
gen = batch_generator(datapath=datapath, batchsize=BATCHSIZE, step=num_batches, new_shape=size)
for epoch in range(EPOCH):
testdata = next(gen)
y = train_model.predict(testdata[0]) # [8, 5, 256, 256]
y = np.transpose(y, [0,2,3,1])
y = np.mean(y, axis=3)
print(y.shape)
y = np.minimum(1, y)
y = np.maximum(0, y)
image = testdata[0]
image = np.transpose(image, [0,2,3,1])
merge = image + y[:,:,:,np.newaxis]
merge = np.minimum(1, merge)
merge = np.maximum(0, merge)
image = (image*255).astype(np.uint8)
y = (y * 255).astype(np.uint8)
merge = (merge * 255).astype(np.uint8)
for i in range(BATCHSIZE):
plt.subplot(1,3,1)
plt.imshow(image[i])
plt.subplot(1,3,2)
plt.imshow(y[i])
plt.gray()
plt.subplot(1,3,3)
plt.imshow(merge[i])
plt.show()
# for i in range(8):
# plt.subplot(4,6, 3*i+1)
# plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off',
# labelleft='off')
# depthcolor = testdata[0][0][i]
# # print(depthcolor.shape)
# depthcolor = np.transpose(depthcolor, [1,2,0])
# # print(depthcolor.shape, depthcolor.dtype)
# plt.imshow(depthcolor)
# plt.subplot(4,6,3*i+2)
# plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off',
# labelleft='off')
# predict = y3[i]
# # print(predict.shape, predict.dtype)
# plt.imshow(predict)
# plt.subplot(4,6,3*i+3)
# plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off',
# labelleft='off')
# groundtruth = testdata[1][i]
# label = np.zeros(size, dtype=np.uint8)
# for k in range(21):
# label[groundtruth[k] == 1] = k + 1
# # groundtruth = np.transpose(groundtruth, [1,2,0])
# # groundtruth = np.sum(groundtruth, axis=-1)
#
# # print(groundtruth.shape, groundtruth.dtype)
#
# plt.imshow(label)
#
# num = number_padded = '{0:04d}'.format(epoch)
# savepath = 'result/' + num + 'rand_f6e32.png'
# plt.savefig(savepath, dpi=300)
# # plt.show()
# plt.cla | mit |
maheshakya/scikit-learn | sklearn/feature_extraction/text.py | 2 | 48447 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg="%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
QuantSoftware/QuantSoftwareToolkit | QSTK/qstklearn/mldiagnostics.py | 7 | 2315 | # (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#
# Created on Month day, Year
#
# @author: Vishal Shekhar
# @contact: [email protected]
# @summary: ML Algo Diagnostic Utility (plots performance of the Algo on Train Vs CV sets)
#
import copy
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
class MLDiagnostics:
"""
This class can be used to produce learning curves.
These are plots of evolution of Training Error and Cross Validation Error across lambda(in general a control param for model complexity).
This plot can help diagnose if the ML algorithmic model has high bias or a high variance problem and can
thus help decide the next course of action.
In general, ML Algorithm is of the form,
Y=f(t,X) + lambdaVal*|t|
where Y is the output, t is the model parameter vector, lambdaVal is the regularization parameter.
|t| is the size of model parameter vector.
"""
def __init__(self,learner,Xtrain,Ytrain,Xcv,Ycv,lambdaArray):
self.learner = learner
self.Xtrain = Xtrain
self.Ytrain = Ytrain
self.Xcv = Xcv
self.Ycv = Ycv
self.lambdaArray = lambdaArray
self.ErrTrain = np.zeros((len(lambdaArray),1))
self.ErrCV = copy.copy(self.ErrTrain)
def avgsqerror(self,Y,Ypred):
return np.sum((Y-Ypred)**2)/len(Y)
def plotCurves(self,filename):
Xrange = [i*self.step for i in range(1,len(self.ErrTrain)+1)]
plt.plot(Xrange,self.ErrTrain,label = "Train Error")
plt.plot(Xrange,self.ErrCV,label="CV Error")
plt.title('Learning Curves')
plt.xlabel('# of Training Examples')
plt.ylabel('Average Error')
plt.draw()
savefig(filename,format='pdf')
def runDiagnostics(self,filename):
for i,lambdaVal in zip(range(len(self.lambdaArray)),self.lambdaArray):
learner = copy.copy(self.learner())# is deep copy required
# setLambda needs to be a supported function for all ML strategies.
learner.setLambda(lambdaVal)
learner.addEvidence(self.Xtrain,self.Ytrain)
YtrPred = learner.query(self.Xtrain)
self.ErrTrain[i] = self.avgsqerror(self.Ytrain,YtrPred)
YcvPred = learner.query(self.Xcv)
self.ErrCV[i] = self.avgsqerror(self.Ycv,YcvPred)
self.plotCurves(filename)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tools/tile.py | 7 | 10290 | """
Quantilization functions and related stuff
"""
from pandas.types.missing import isnull
from pandas.types.common import (is_float, is_integer,
is_scalar)
from pandas.core.api import Series
from pandas.core.categorical import Categorical
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas.compat import zip
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int or sequence of scalars
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int
The precision at which to store and display the bins labels
include_lowest : bool
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
([(0.191, 3.367], (0.191, 3.367], (0.191, 3.367], (3.367, 6.533],
(6.533, 9.7], (0.191, 3.367]]
Categories (3, object): [(0.191, 3.367] < (3.367, 6.533] < (6.533, 9.7]],
array([ 0.1905 , 3.36666667, 6.53333333, 9.7 ]))
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3,
labels=["good","medium","bad"])
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1], dtype=int64)
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
# handle empty arrays. Can't determine range, so use 0-1.
# rng = (0, 1)
else:
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn)
mx += .001 * abs(mx)
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
else:
bins = np.asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
return _bins_to_cuts(x, bins, right=right, labels=labels,
retbins=retbins, precision=precision,
include_lowest=include_lowest)
def qcut(x, q, labels=None, retbins=False, precision=3):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int
The precision at which to store and display the bins labels
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
[[0, 1], [0, 1], (1, 2], (2, 3], (3, 4]]
Categories (4, object): [[0, 1] < (1, 2] < (2, 3] < (3, 4]]
>>> pd.qcut(range(5), 3, labels=["good","medium","bad"])
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3], dtype=int64)
"""
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
return _bins_to_cuts(x, bins, labels=labels, retbins=retbins,
precision=precision, include_lowest=True)
def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False,
precision=3, name=None, include_lowest=False):
x_is_series = isinstance(x, Series)
series_index = None
if x_is_series:
series_index = x.index
if name is None:
name = x.name
x = np.asarray(x)
side = 'left' if right else 'right'
ids = bins.searchsorted(x, side=side)
if len(algos.unique(bins)) < len(bins):
raise ValueError('Bin edges must be unique: %s' % repr(bins))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isnull(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
increases = 0
while True:
try:
levels = _format_levels(bins, precision, right=right,
include_lowest=include_lowest)
except ValueError:
increases += 1
precision += 1
if increases >= 20:
raise
else:
break
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
levels = labels
levels = np.asarray(levels, dtype=object)
np.putmask(ids, na_mask, 0)
fac = Categorical(ids - 1, levels, ordered=True, fastpath=True)
else:
fac = ids - 1
if has_nas:
fac = fac.astype(np.float64)
np.putmask(fac, na_mask, np.nan)
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
return fac, bins
def _format_levels(bins, prec, right=True,
include_lowest=False):
fmt = lambda v: _format_label(v, precision=prec)
if right:
levels = []
for a, b in zip(bins, bins[1:]):
fa, fb = fmt(a), fmt(b)
if a != b and fa == fb:
raise ValueError('precision too low')
formatted = '(%s, %s]' % (fa, fb)
levels.append(formatted)
if include_lowest:
levels[0] = '[' + levels[0][1:]
else:
levels = ['[%s, %s)' % (fmt(a), fmt(b))
for a, b in zip(bins, bins[1:])]
return levels
def _format_label(x, precision=3):
fmt_str = '%%.%dg' % precision
if np.isinf(x):
return str(x)
elif is_float(x):
frac, whole = np.modf(x)
sgn = '-' if x < 0 else ''
whole = abs(whole)
if frac != 0.0:
val = fmt_str % frac
# rounded up or down
if '.' not in val:
if x < 0:
return '%d' % (-whole - 1)
else:
return '%d' % (whole + 1)
if 'e' in val:
return _trim_zeros(fmt_str % x)
else:
val = _trim_zeros(val)
if '.' in val:
return sgn + '.'.join(('%d' % whole, val.split('.')[1]))
else: # pragma: no cover
return sgn + '.'.join(('%d' % whole, val))
else:
return sgn + '%0.f' % whole
else:
return str(x)
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
| apache-2.0 |
whitews/dpconverge | test_dp_3params.py | 1 | 1245 | from dpconverge.data_set import DataSet
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
n_features = 3
points_per_feature = 100
centers = [[2, 2, 1], [2, 4, 2], [4, 2, 3], [4, 4, 4]]
ds = DataSet(parameter_count=n_features)
rnd_state = np.random.RandomState()
rnd_state.seed(3)
for i, center in enumerate(centers):
X, y = make_blobs(
n_samples=points_per_feature,
n_features=n_features,
centers=center,
cluster_std=0.2,
random_state=rnd_state.randint(128)
)
ds.add_blob(i, X)
component_count = 6
ds.plot_blobs(ds.classifications, x_lim=[0, 6], y_lim=[0, 6])
ds.plot_blobs(ds.classifications, x=0, y=2, x_lim=[0, 6], y_lim=[0, 6])
ds.cluster(
component_count=component_count,
burn_in=100,
iteration_count=400,
random_seed=1
)
valid_components = ds.get_valid_components()
print "Recommended component count: ", len(valid_components)
for i in range(component_count):
if i in valid_components:
ds.plot_iteration_traces(i)
# for i in range(component_count):
# if i not in valid_components:
# print "Possible invalid Component"
# ds.plot_iteration_traces(i)
ds.plot_animated_trace(x_lim=[0, 6], y_lim=[0, 6])
| bsd-3-clause |
abimannans/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
TaxIPP-Life/til-france | setup.py | 1 | 2778 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# TaxIPP-Life BaseModel (TIL-BaseModel) -- A microsimulation model over the life-cycle
# By: TaxIPP-Life (TIL) Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014 TaxIPP-Life (TIL) Team
# (https://github.com/TaxIPP-Life/Til-BaseModel)
#
# This file is part of TaxIPP-Life (TIL).
#
# TaxIPP-Life (TIL) is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# TaxIPP-Life (TIL) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""TaxIPP-Life (TIL) is microsimulation model over the life-cycle
TaxIPP-Life (TIL) is based on OpenFisca (www.openfisca.fr) and liam2 (http://liam2.plan.be)
"""
from setuptools import setup, find_packages
classifiers = """\
Development Status :: 2 - Pre-Alpha
License :: OSI Approved :: GNU Affero General Public License v3
Operating System :: POSIX
Programming Language :: Python
Topic :: Scientific/Engineering :: Information Analysis
"""
doc_lines = __doc__.split('\n')
setup(
name = 'Til-France',
version = '0.1.1',
author = 'TaxIPP-Life (TIL) Team',
author_email = '[email protected]',
classifiers = [classifier for classifier in classifiers.split('\n') if classifier],
description = doc_lines[0],
keywords = 'benefit microsimulation social tax life-cycle',
license = 'http://www.fsf.org/licensing/licenses/agpl-3.0.html',
long_description = '\n'.join(doc_lines[2:]),
url = 'https://github.com/TaxIPP-Life/til-france',
entry_points = {
'console_scripts': [
'build_parameters=til_france.scripts.build_parameters:main',
'til_init=til_france.scripts.til_init:main',
]
},
extras_require = dict(
test = [
'ipp-macro-series-parser',
'matplotlib',
'nose',
'pandas >= 0.17',
'patsy',
'scipy >= 0.17',
'seaborn',
'statsmodels',
'webcolors',
'xlrd',
],
),
install_requires = [
'liam2',
'numpy',
'openfisca-survey-manager >= 0.9.8',
'PyYAML >= 3.10',
'til-core',
],
packages = find_packages(),
test_suite = 'nose.collector',
)
| gpl-3.0 |
AlirezaShahabi/zipline | tests/utils/test_factory.py | 34 | 2175 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import pandas as pd
import pytz
import numpy as np
from zipline.utils.factory import (load_from_yahoo,
load_bars_from_yahoo)
class TestFactory(TestCase):
def test_load_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=stocks, start=start, end=end)
assert data.index[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.index[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.columns
np.testing.assert_raises(
AssertionError, load_from_yahoo, stocks=stocks,
start=end, end=start
)
def test_load_bars_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=stocks, start=start, end=end)
assert data.major_axis[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.major_axis[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.items
for ohlc in ['open', 'high', 'low', 'close', 'volume', 'price']:
assert ohlc in data.minor_axis
np.testing.assert_raises(
AssertionError, load_bars_from_yahoo, stocks=stocks,
start=end, end=start
)
| apache-2.0 |
low-sky/cohrscld | sfe_ff.py | 1 | 2961 | import numpy as np
from astropy.table import Table
import scipy.stats as ss
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.constants as con
cohrs_dir = '/mnt/ephem/ephem/erosolow/cohrs/'
t = Table.read(cohrs_dir + 'cohrs_withir_withsfr.fits')
t = t[t['n_pixel']>1e4]
t = t[t['mlum_msun']>1e3]
rho = t['mlum_msun'].data * u.M_sun / (4 * np.pi *
(t['radius_pc'].data * u.pc)**3 / 3)
alpha = ((5 * t['sigv_kms']**2 * (u.km / u.s)**2
* t['radius_pc'].data * u.pc) *
(con.G * t['mlum_msun'].data * u.M_sun)**(-1)).to(
u.dimensionless_unscaled)
Mach = t['sigv_kms'] / 0.2
tff = (((3 * np.pi / (32 * con.G * rho))**0.5).to(u.Myr)).value
area = t['area_exact_as'] / 206265**2 * t['bgps_distance_pc']**2
# area = np.pi * (t['radius_pc'].data * u.pc)**2
sfe = 1e6 * (t['sfr_70um']/ area)
x = 0.014 * (alpha / 1.3)**(-0.68) * (Mach / 100)**(-0.32) *\
t['mlum_msun'].data / tff / area#.value
#x = (0.014 * t['mlum_msun'].data / tff / area)#.value
x = (0.026 * (alpha / 1.3)**(-0.3) * (Mach / 100)**(0.8) *
t['mlum_msun'].data / tff / area)
x = x#.value
y = sfe
y = y#.value
fig, (ax1) = plt.subplots(1)
fig.set_size_inches(5, 4)
idx = t['mlum_msun'].data > 1e2
xmin = -5
xmax = 1
ymin = -5
ymax = 1
val, edges, _ = ss.binned_statistic(np.log10(x[idx]),
np.log10(y[idx]),
statistic=np.nanmedian, bins=10)
histdata, xedge, yedge = np.histogram2d(np.log10(x[idx]),
np.log10(y[idx]),
range=[[xmin, xmax], [ymin, ymax]],
bins=40)
ax1.scatter(x[idx], y[idx], edgecolor='k',
facecolor='none', zorder=-99)
#ax1.plot(1e1**(0.5 * (edges[1:] + edges[0:-1])), 1e1**val, color='green', lw=3)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlabel(r'$\mathrm{SFR_{ff}} \Sigma_{\mathrm{CO}} ' +
r't_{\mathrm{ff}}^{-1}\ (M_{\odot}\ ' +
r'\mathrm{Myr}^{-1}\ \mathrm{pc}^{-2})$', size=16)
ax1.set_ylabel(r'$\dot{\Sigma}_{\star} (M_{\odot}\ ' +
r'\mathrm{Myr}^{-1}\ \mathrm{pc}^{-2})$',
size=16)
# ax1.set_xlim(1e1**])
ax1.set_ylim([1e1**ymin, 1e1**ymax])
ax1.set_xlim([1e1**xmin, 1e1**xmax])
histdata[histdata < 3] = np.nan
ax1.grid()
print(xmin, xmax)
cax = ax1.pcolormesh(1e1**xedge, 1e1**yedge,
np.ma.fix_invalid(histdata.T), vmin=1, vmax=np.nanmax(histdata))
# cax = ax1.imshow(histdata.T, extent=[1e1**xmin, 1e1**xmax,
# 1e1**ymin, 1e1**ymax], origin='lower',
# interpolation='nearest', cmap='inferno', vmin=2, aspect='auto')
ax1.plot([1e1**xmin, 1e1**xmax],[1e1**ymin, 1e1**ymax], alpha=0.5, lw=3)
cb = fig.colorbar(cax)
cb.set_label(r'Number')
fig.tight_layout()
plt.savefig('sfe_model.png', dpi=300)
plt.close(fig)
plt.clf()
| gpl-3.0 |
toobaz/pandas | pandas/tests/test_lib.py | 2 | 7547 | import numpy as np
import pytest
from pandas._libs import lib, writers as libwriters
from pandas import Index
import pandas.util.testing as tm
class TestMisc:
def test_max_len_string_array(self):
arr = a = np.array(["foo", "b", np.nan], dtype="object")
assert libwriters.max_len_string_array(arr) == 3
# unicode
arr = a.astype("U").astype(object)
assert libwriters.max_len_string_array(arr) == 3
# bytes for python3
arr = a.astype("S").astype(object)
assert libwriters.max_len_string_array(arr) == 3
# raises
with pytest.raises(TypeError):
libwriters.max_len_string_array(arr.astype("U"))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [["p", "a"], ["n", "d"], ["a", "s"]]
gen = (key for key in keys)
expected = np.array(["a", "d", "n", "p", "s"])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(["p", "a", "n", "d", "s"])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
class TestIndexing:
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_cache_readonly_preserve_docstrings():
# GH18197
assert Index.hasnans.__doc__ is not None
| bsd-3-clause |
camallen/aggregation | experimental/condor/retireBlank.py | 2 | 2893 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
import cPickle as pickle
sys.path.append("/home/greg/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['condor_2014-09-14']
collection = db["condor_classifications"]
collection2 = db["condor_subjects"]
subjects = []
users = []
classifications = []
with open("/home/greg/Databases/condor_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \"/home/greg/Databases/condor_ibcc.csv\"\n")
f.write("outputFile = \"/home/greg/Databases/condor_ibcc.out\"\n")
f.write("confMatFile = \"/home/greg/Databases/condor_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
import datetime
i = 0
errorCount = 0
for r in collection.find({"$and": [{"tutorial": False},{"subjects" : {"$elemMatch" : {"zooniverse_id" : {"$exists" : True}}}}]}):
try:
user_name = r["user_name"]
except KeyError:
continue
subject_id = r["subjects"][0]["zooniverse_id"]
if not(user_name in users):
users.append(user_name)
if not(subject_id in subjects):
subjects.append(subject_id)
user_index = users.index(user_name)
subject_index = subjects.index(subject_id)
if ("marks" in r["annotations"][-1]):
blank = 1
for markings in r["annotations"][-1]["marks"].values():
try:
if markings["animal"] in ["condor","raven","goldenEagle","coyote","turkeyVulture"]:
blank = 0
break
elif markings["animal"] in ["carcassOrScale"]:
continue
else:
errorCount += 1
except KeyError:
errorCount += 1
else:
blank = 1
i += 1
#if i == 1000:
# break
if (i % 5000) == 0:
print i
classifications.append((user_index,subject_index,blank))
print "====----"
print errorCount
try:
os.remove("/home/greg/Databases/condor_ibcc.out")
except OSError:
pass
try:
os.remove("/home/greg/Databases/condor_ibcc.mat")
except OSError:
pass
try:
os.remove("/home/greg/Databases/condor_ibcc.csv.dat")
except OSError:
pass
with open("/home/greg/Databases/condor_ibcc.csv","wb") as f:
f.write("a,b,c\n")
for u, s, b in classifications:
f.write(str(u)+","+str(s)+","+str(b)+"\n")
print datetime.datetime.time(datetime.datetime.now())
ibcc.runIbcc("/home/greg/Databases/condor_ibcc.py")
print datetime.datetime.time(datetime.datetime.now())
pickle.dump(subjects,open("/home/greg/Databases/condor_ibcc.pickle","wb")) | apache-2.0 |
equialgo/scikit-learn | sklearn/ensemble/voting_classifier.py | 19 | 9888 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..utils.validation import has_fit_parameter, check_is_fitted
def _parallel_fit_estimator(estimator, X, y, sample_weight):
"""Private function used to fit an estimator within a job."""
if sample_weight is not None:
estimator.fit(X, y, sample_weight)
else:
estimator.fit(X, y)
return estimator
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for ``fit``.
If -1, then the number of jobs is set to the number of cores.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array-like, shape = [n_predictions]
The classes labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None, n_jobs=1):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if (self.weights is not None and
len(self.weights) != len(self.estimators)):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
if sample_weight is not None:
for name, step in self.estimators:
if not has_fit_parameter(step, 'sample_weight'):
raise ValueError('Underlying estimator \'%s\' does not support'
' sample weights.' % name)
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
transformed_y = self.le_.transform(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y,
sample_weight)
for _, clf in self.estimators)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
sofianehaddad/MVRM | test/test_3U_2_nondiag.py | 1 | 5625 | # -*- Python -*-
#
# @file test_3U_2_nondiag.py
# @brief MultivariateRandomMixture validation tests
#
# Copyright (C) 2013 EADS IW France
#
# Author(s) : Denis Barbier, IMACS
# Sofiane Haddad, IMACS
#
# This program is free software; you can redistribute it and/or
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests of MultivariateRandomMixture
===================================
Test 2: R^3-->R^2 case
Validation with respect to the Maple
use case
"""
if __name__ == "__main__":
import openturns as ot
import MultivariateRandomMixture as MV
import MaxNormMeshGrid
import numpy as np
import time
blockMin = 3
blockMax = 9
maxSize = 1 << blockMax
pdf_precision = 1e-8
"""
Test
------
"""
#ot.Log.Show(ot.Log.ALL)
collection = ot.DistributionCollection([ot.Uniform(0,1), ot.Uniform(0,1), ot.Uniform(0,1)])
matrix = ot.Matrix([[1, -2, 1], [1, 1, -3]])
distribution = MV.PythonMultivariateRandomMixture(collection, matrix)
distribution.setGridMesher(MaxNormMeshGrid.CachedMeshGrid(MaxNormMeshGrid.SkinCube2D(distribution.getReferenceBandwidth(), symmetric=True), size=maxSize))
interval = distribution.getRange()
mean = distribution.getMean()
cov = distribution.getCovariance()
sigma = distribution.getStandardDeviation()
print "range = ", interval
print "mean = ", mean
print "cov = ", cov
print "sigma = ", sigma
distribution.setBlockMin(blockMin)
distribution.setBlockMax(blockMax)
distribution.setPDFPrecision(pdf_precision)
# importing validation sample
validation_sample = ot.NumericalSample.ImportFromCSVFile("../validation/valid_d2_3unif.csv")
# sample for error observation
estimate_sample = ot.NumericalSample(len(validation_sample), 3)
delta = 0.0
dt = []
for ind in xrange(len(validation_sample)):
x, y, pdf_theoretical = tuple(validation_sample[ind])
estimate_sample[ind, 0] = x
estimate_sample[ind, 1] = y
u = [x, y]
tic = time.time()
pdf_estimate = distribution.computePDF(u)
toc = time.time()
estimate_sample[ind, 2] = pdf_estimate
dt.append(toc - tic)
print "dt = %s"%(toc-tic)
delta += abs((pdf_estimate - pdf_theoretical))**2
print "x=%s, y=%s, pdf_estimate=%s pdf_theoretical=%s"%(x, y, pdf_estimate, pdf_theoretical)
if abs(pdf_theoretical) < 1e-16:
pdf_theoretical = 0.0
try :
error = abs(pdf_theoretical - pdf_estimate)/pdf_theoretical
print "relative_error=%s"%error
except ZeroDivisionError:
error = abs(pdf_theoretical - pdf_estimate)
print "absolute_error=%s"%error
print "pdf_error=%s" %distribution.getLastPDFError()
# Variation of characteristic function
delta /= len(validation_sample)
print "delta of pdf=%s" %(np.sqrt(delta))
# Reorganization of data using meshgrid for graphical purposes
x = np.unique(np.array(validation_sample.getMarginal(0)))
y = np.unique(np.array(validation_sample.getMarginal(1)))
grid_x, grid_y = np.meshgrid(x,y)
shape = grid_x.shape
pdf_theoretical = np.ndarray(shape)
pdf_estimate = np.ndarray(shape)
ind = 0
for i in range(shape[0]):
for j in range(shape[1]):
# grid_x[j,i],grid_y[j,i] respectively equal to validation_sample[ind,0], validation_sample[ind,1]
pdf_theoretical[j,i] = validation_sample[ind, 2]
pdf_estimate[j,i] = estimate_sample[ind, 2]
ind +=1
try :
import matplotlib.pylab as plt
fig = plt.figure()
plt.contour(pdf_estimate, vmin=np.min(pdf_estimate), vmax=np.max(pdf_estimate), origin='lower', extent=[np.min(x), np.max(x), np.min(y), np.max(y)])
plt.colorbar()
plt.title("Estimated PDF with MVRM")
plt.savefig("3Uniform2d_pdf.pdf")
plt.close('all')
fig = plt.figure()
pdf_error = pdf_estimate - pdf_theoretical
plt.imshow(pdf_error, vmin=np.min(pdf_error), vmax=np.max(pdf_error), origin='lower', extent=[np.min(x), np.max(x), np.min(y), np.max(y)])
plt.colorbar()
plt.title("Error PDF with MVRM")
plt.savefig("3Uniform2d_error_pdf.pdf")
plt.close('all')
except ImportError:
ot.log.Warn("Matplotlib not found. Could not create iso values graph of pdf")
# computation on a grid using FFT
# grid of form mean + b sigma in each direction
b = 4.0
N = 1024
[y, pdf_grid] = distribution.computePDFOn2DGrid(b, N)
try :
import matplotlib.pylab as plt
fig = plt.figure()
plt.contour(pdf_grid, vmin=np.min(pdf_grid), vmax=np.max(pdf_grid), origin='lower', extent=[np.min(y[0]), np.max(y[0]), np.min(y[1]), np.max(y[1])])
plt.colorbar()
plt.title("Estimated PDF with MVRM using FFT")
plt.savefig("3Uniform2d_pdf_grid.pdf")
plt.close('all')
except ImportError:
pass
| lgpl-3.0 |
liberatorqjw/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
procoder317/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
tawsifkhan/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
dongjoon-hyun/spark | python/pyspark/pandas/indexing.py | 9 | 68031 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A loc indexer for pandas-on-Spark DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections.abc import Iterable
from functools import reduce
from typing import Any, Optional, List, Tuple, TYPE_CHECKING, Union, cast, Sized
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import functions as F, Column
from pyspark.sql.types import BooleanType, LongType
from pyspark.sql.utils import AnalysisException
import numpy as np
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Label, Name, Scalar
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.exceptions import SparkPandasIndexingError, SparkPandasNotImplementedError
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
lazy_property,
name_like_string,
same_anchor,
scol_for,
spark_column_equals,
verify_temp_column_name,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class IndexerLike(object):
def __init__(self, psdf_or_psser: Union["Series", "DataFrame"]):
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series
assert isinstance(
psdf_or_psser, (DataFrame, Series)
), "unexpected argument type: {}".format(type(psdf_or_psser))
self._psdf_or_psser = psdf_or_psser
@property
def _is_df(self) -> bool:
from pyspark.pandas.frame import DataFrame
return isinstance(self._psdf_or_psser, DataFrame)
@property
def _is_series(self) -> bool:
from pyspark.pandas.series import Series
return isinstance(self._psdf_or_psser, Series)
@property
def _psdf(self) -> "DataFrame":
if self._is_df:
return cast("DataFrame", self._psdf_or_psser)
else:
assert self._is_series
return self._psdf_or_psser._psdf
@property
def _internal(self) -> InternalFrame:
return self._psdf._internal
class AtIndexer(IndexerLike):
"""
Access a single value for a row/column label pair.
If the index is not unique, all matching pairs are returned as an array.
Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to
get a single value in a DataFrame or Series.
.. note:: Unlike pandas, pandas-on-Spark only allows using ``at`` to get values but not to
set them.
.. note:: Warning: If ``row_index`` matches a lot of rows, large amounts of data will be
fetched, potentially causing your machine to run out of memory.
Raises
------
KeyError
When label does not exist in DataFrame
Examples
--------
>>> psdf = ps.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... index=[4, 5, 5], columns=['A', 'B', 'C'])
>>> psdf
A B C
4 0 2 3
5 0 4 1
5 10 20 30
Get value at specified row/column pair
>>> psdf.at[4, 'B']
2
Get array if an index occurs multiple times
>>> psdf.at[5, 'B']
array([ 4, 20])
"""
def __getitem__(self, key: Any) -> Union["Series", "DataFrame", Scalar]:
if self._is_df:
if not isinstance(key, tuple) or len(key) != 2:
raise TypeError("Use DataFrame.at like .at[row_index, column_name]")
row_sel, col_sel = key
else:
assert self._is_series, type(self._psdf_or_psser)
if isinstance(key, tuple) and len(key) != 1:
raise TypeError("Use Series.at like .at[row_index]")
row_sel = key
col_sel = self._psdf_or_psser._column_label
if self._internal.index_level == 1:
if not is_name_like_value(row_sel, allow_none=False, allow_tuple=False):
raise ValueError("At based indexing on a single index can only have a single value")
row_sel = (row_sel,)
else:
if not is_name_like_tuple(row_sel, allow_none=False):
raise ValueError("At based indexing on multi-index can only have tuple values")
if col_sel is not None:
if not is_name_like_value(col_sel, allow_none=False):
raise ValueError("At based indexing on multi-index can only have tuple values")
if not is_name_like_tuple(col_sel):
col_sel = (col_sel,)
cond = reduce(
lambda x, y: x & y,
[scol == row for scol, row in zip(self._internal.index_spark_columns, row_sel)],
)
pdf = (
self._internal.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
.filter(cond)
.select(self._internal.spark_column_for(col_sel))
.toPandas()
)
if len(pdf) < 1:
raise KeyError(name_like_string(row_sel))
values = cast(pd.DataFrame, pdf).iloc[:, 0].values
return (
values if (len(row_sel) < self._internal.index_level or len(values) > 1) else values[0]
)
class iAtIndexer(IndexerLike):
"""
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
KeyError
When label does not exist in DataFrame
Examples
--------
>>> df = ps.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
1
Get value within a series
>>> psser = ps.Series([1, 2, 3], index=[10, 20, 30])
>>> psser
10 1
20 2
30 3
dtype: int64
>>> psser.iat[1]
2
"""
def __getitem__(self, key: Any) -> Union["Series", "DataFrame", Scalar]:
if self._is_df:
if not isinstance(key, tuple) or len(key) != 2:
raise TypeError(
"Use DataFrame.iat like .iat[row_integer_position, column_integer_position]"
)
row_sel, col_sel = key
if not isinstance(row_sel, int) or not isinstance(col_sel, int):
raise ValueError("iAt based indexing can only have integer indexers")
return self._psdf_or_psser.iloc[row_sel, col_sel]
else:
assert self._is_series, type(self._psdf_or_psser)
if not isinstance(key, int) and len(key) != 1:
raise TypeError("Use Series.iat like .iat[row_integer_position]")
if not isinstance(key, int):
raise ValueError("iAt based indexing can only have integer indexers")
return self._psdf_or_psser.iloc[key]
class LocIndexerLike(IndexerLike, metaclass=ABCMeta):
def _select_rows(self, rows_sel: Any) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
"""
Dispatch the logic for select rows to more specific methods by `rows_sel` argument types.
Parameters
----------
rows_sel : the key specified to select rows.
Returns
-------
Tuple of Spark column, int, int:
* The Spark column for the condition to filter the rows.
* The number of rows when the selection can be simplified by limit.
* The remaining index rows if the result index size is shrunk.
"""
from pyspark.pandas.series import Series
if rows_sel is None:
return None, None, None
elif isinstance(rows_sel, Series):
return self._select_rows_by_series(rows_sel)
elif isinstance(rows_sel, Column):
return self._select_rows_by_spark_column(rows_sel)
elif isinstance(rows_sel, slice):
if rows_sel == slice(None):
# If slice is None - select everything, so nothing to do
return None, None, None
return self._select_rows_by_slice(rows_sel)
elif isinstance(rows_sel, tuple):
return self._select_rows_else(rows_sel)
elif is_list_like(rows_sel):
return self._select_rows_by_iterable(rows_sel)
else:
return self._select_rows_else(rows_sel)
def _select_cols(
self, cols_sel: Any, missing_keys: Optional[List[Name]] = None
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
"""
Dispatch the logic for select columns to more specific methods by `cols_sel` argument types.
Parameters
----------
cols_sel : the key specified to select columns.
Returns
-------
Tuple of list of column label, list of Spark columns, list of dtypes, bool:
* The column labels selected.
* The Spark columns selected.
* The field metadata selected.
* The boolean value whether Series should be returned or not.
* The Series name if needed.
"""
from pyspark.pandas.series import Series
if cols_sel is None:
column_labels = self._internal.column_labels
data_spark_columns = self._internal.data_spark_columns
data_fields = self._internal.data_fields
return column_labels, data_spark_columns, data_fields, False, None
elif isinstance(cols_sel, Series):
return self._select_cols_by_series(cols_sel, missing_keys)
elif isinstance(cols_sel, Column):
return self._select_cols_by_spark_column(cols_sel, missing_keys)
elif isinstance(cols_sel, slice):
if cols_sel == slice(None):
# If slice is None - select everything, so nothing to do
column_labels = self._internal.column_labels
data_spark_columns = self._internal.data_spark_columns
data_fields = self._internal.data_fields
return column_labels, data_spark_columns, data_fields, False, None
return self._select_cols_by_slice(cols_sel, missing_keys)
elif isinstance(cols_sel, tuple):
return self._select_cols_else(cols_sel, missing_keys)
elif is_list_like(cols_sel):
return self._select_cols_by_iterable(cols_sel, missing_keys)
else:
return self._select_cols_else(cols_sel, missing_keys)
# Methods for row selection
@abstractmethod
def _select_rows_by_series(
self, rows_sel: "Series"
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
"""Select rows by `Series` type key."""
pass
@abstractmethod
def _select_rows_by_spark_column(
self, rows_sel: Column
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
"""Select rows by Spark `Column` type key."""
pass
@abstractmethod
def _select_rows_by_slice(
self, rows_sel: slice
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
"""Select rows by `slice` type key."""
pass
@abstractmethod
def _select_rows_by_iterable(
self, rows_sel: Iterable
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
"""Select rows by `Iterable` type key."""
pass
@abstractmethod
def _select_rows_else(
self, rows_sel: Any
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
"""Select rows by other type key."""
pass
# Methods for col selection
@abstractmethod
def _select_cols_by_series(
self, cols_sel: "Series", missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
"""Select columns by `Series` type key."""
pass
@abstractmethod
def _select_cols_by_spark_column(
self, cols_sel: Column, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
"""Select columns by Spark `Column` type key."""
pass
@abstractmethod
def _select_cols_by_slice(
self, cols_sel: slice, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
"""Select columns by `slice` type key."""
pass
@abstractmethod
def _select_cols_by_iterable(
self, cols_sel: Iterable, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
"""Select columns by `Iterable` type key."""
pass
@abstractmethod
def _select_cols_else(
self, cols_sel: Any, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
"""Select columns by other type key."""
pass
def __getitem__(self, key: Any) -> Union["Series", "DataFrame"]:
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series, first_series
if self._is_series:
if isinstance(key, Series) and not same_anchor(key, self._psdf_or_psser):
psdf = self._psdf_or_psser.to_frame()
temp_col = verify_temp_column_name(psdf, "__temp_col__")
psdf[temp_col] = key
return type(self)(psdf[self._psdf_or_psser.name])[psdf[temp_col]]
cond, limit, remaining_index = self._select_rows(key)
if cond is None and limit is None:
return self._psdf_or_psser
column_label = self._psdf_or_psser._column_label
column_labels = [column_label]
data_spark_columns = [self._internal.spark_column_for(column_label)]
data_fields = [self._internal.field_for(column_label)]
returns_series = True
series_name = self._psdf_or_psser.name
else:
assert self._is_df
if isinstance(key, tuple):
if len(key) != 2:
raise SparkPandasIndexingError("Only accepts pairs of candidates")
rows_sel, cols_sel = key
else:
rows_sel = key
cols_sel = None
if isinstance(rows_sel, Series) and not same_anchor(rows_sel, self._psdf_or_psser):
psdf = self._psdf_or_psser.copy()
temp_col = verify_temp_column_name(cast("DataFrame", psdf), "__temp_col__")
psdf[temp_col] = rows_sel
return type(self)(psdf)[psdf[temp_col], cols_sel][list(self._psdf_or_psser.columns)]
cond, limit, remaining_index = self._select_rows(rows_sel)
(
column_labels,
data_spark_columns,
data_fields,
returns_series,
series_name,
) = self._select_cols(cols_sel)
if cond is None and limit is None and returns_series:
psser = self._psdf_or_psser._psser_for(column_labels[0])
if series_name is not None and series_name != psser.name:
psser = psser.rename(series_name)
return psser
if remaining_index is not None:
index_spark_columns = self._internal.index_spark_columns[-remaining_index:]
index_names = self._internal.index_names[-remaining_index:]
index_fields = self._internal.index_fields[-remaining_index:]
else:
index_spark_columns = self._internal.index_spark_columns
index_names = self._internal.index_names
index_fields = self._internal.index_fields
if len(column_labels) > 0:
column_labels = column_labels.copy()
column_labels_level = max(
len(label) if label is not None else 1 for label in column_labels
)
none_column = 0
for i, label in enumerate(column_labels):
if label is None:
label = (none_column,)
none_column += 1
if len(label) < column_labels_level:
label = tuple(list(label) + ([""]) * (column_labels_level - len(label)))
column_labels[i] = label
if i == 0 and none_column == 1:
column_labels = [None]
column_label_names = self._internal.column_label_names[-column_labels_level:]
else:
column_label_names = self._internal.column_label_names
try:
sdf = self._internal.spark_frame
if cond is not None:
index_columns = sdf.select(index_spark_columns).columns
data_columns = sdf.select(data_spark_columns).columns
sdf = sdf.filter(cond).select(index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in index_columns]
data_spark_columns = [scol_for(sdf, col) for col in data_columns]
if limit is not None:
if limit >= 0:
sdf = sdf.limit(limit)
else:
sdf = sdf.limit(sdf.count() + limit)
sdf = sdf.drop(NATURAL_ORDER_COLUMN_NAME)
except AnalysisException:
raise KeyError(
"[{}] don't exist in columns".format(
[col._jc.toString() for col in data_spark_columns] # type: ignore
)
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_fields=data_fields,
column_label_names=column_label_names,
)
psdf = DataFrame(internal)
if returns_series:
psdf_or_psser = first_series(psdf)
if series_name is not None and series_name != psdf_or_psser.name:
psdf_or_psser = psdf_or_psser.rename(series_name)
else:
psdf_or_psser = psdf
if remaining_index is not None and remaining_index == 0:
pdf_or_pser = psdf_or_psser.head(2).to_pandas()
length = len(pdf_or_pser)
if length == 0:
raise KeyError(name_like_string(key))
elif length == 1:
return pdf_or_pser.iloc[0]
else:
return psdf_or_psser
else:
return psdf_or_psser
def __setitem__(self, key: Any, value: Any) -> None:
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series, first_series
if self._is_series:
if (
isinstance(key, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(key, self._psdf_or_psser))
) or (
isinstance(value, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(value, self._psdf_or_psser))
):
if self._psdf_or_psser.name is None:
psdf = self._psdf_or_psser.to_frame()
column_label = psdf._internal.column_labels[0]
else:
psdf = self._psdf_or_psser._psdf.copy()
column_label = self._psdf_or_psser._column_label
temp_natural_order = verify_temp_column_name(psdf, "__temp_natural_order__")
temp_key_col = verify_temp_column_name(psdf, "__temp_key_col__")
temp_value_col = verify_temp_column_name(psdf, "__temp_value_col__")
psdf[temp_natural_order] = F.monotonically_increasing_id()
if isinstance(key, Series):
psdf[temp_key_col] = key
if isinstance(value, Series):
psdf[temp_value_col] = value
psdf = psdf.sort_values(temp_natural_order).drop(temp_natural_order)
psser = psdf._psser_for(column_label)
if isinstance(key, Series):
key = F.col(
"`{}`".format(psdf[temp_key_col]._internal.data_spark_column_names[0])
)
if isinstance(value, Series):
value = F.col(
"`{}`".format(psdf[temp_value_col]._internal.data_spark_column_names[0])
)
type(self)(psser)[key] = value
if self._psdf_or_psser.name is None:
psser = psser.rename()
self._psdf_or_psser._psdf._update_internal_frame(
psser._psdf[
self._psdf_or_psser._psdf._internal.column_labels
]._internal.resolved_copy,
requires_same_anchor=False,
)
return
if isinstance(value, DataFrame):
raise ValueError("Incompatible indexer with DataFrame")
cond, limit, remaining_index = self._select_rows(key)
if cond is None:
cond = SF.lit(True)
if limit is not None:
cond = cond & (
self._internal.spark_frame[cast(iLocIndexer, self)._sequence_col]
< SF.lit(limit)
)
if isinstance(value, (Series, Column)):
if remaining_index is not None and remaining_index == 0:
raise ValueError(
"No axis named {} for object type {}".format(key, type(value).__name__)
)
if isinstance(value, Series):
value = value.spark.column
else:
value = SF.lit(value)
scol = (
F.when(cond, value)
.otherwise(self._internal.spark_column_for(self._psdf_or_psser._column_label))
.alias(name_like_string(self._psdf_or_psser.name or SPARK_DEFAULT_SERIES_NAME))
)
internal = self._internal.with_new_spark_column(
self._psdf_or_psser._column_label, scol # TODO: dtype?
)
self._psdf_or_psser._psdf._update_internal_frame(internal, requires_same_anchor=False)
else:
assert self._is_df
if isinstance(key, tuple):
if len(key) != 2:
raise SparkPandasIndexingError("Only accepts pairs of candidates")
rows_sel, cols_sel = key
else:
rows_sel = key
cols_sel = None
if isinstance(value, DataFrame):
if len(value.columns) == 1:
value = first_series(value)
else:
raise ValueError("Only a dataframe with one column can be assigned")
if (
isinstance(rows_sel, Series)
and (
isinstance(self, iLocIndexer) or not same_anchor(rows_sel, self._psdf_or_psser)
)
) or (
isinstance(value, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(value, self._psdf_or_psser))
):
psdf = cast(DataFrame, self._psdf_or_psser.copy())
temp_natural_order = verify_temp_column_name(psdf, "__temp_natural_order__")
temp_key_col = verify_temp_column_name(psdf, "__temp_key_col__")
temp_value_col = verify_temp_column_name(psdf, "__temp_value_col__")
psdf[temp_natural_order] = F.monotonically_increasing_id()
if isinstance(rows_sel, Series):
psdf[temp_key_col] = rows_sel
if isinstance(value, Series):
psdf[temp_value_col] = value
psdf = psdf.sort_values(temp_natural_order).drop(temp_natural_order)
if isinstance(rows_sel, Series):
rows_sel = F.col(
"`{}`".format(psdf[temp_key_col]._internal.data_spark_column_names[0])
)
if isinstance(value, Series):
value = F.col(
"`{}`".format(psdf[temp_value_col]._internal.data_spark_column_names[0])
)
type(self)(psdf)[rows_sel, cols_sel] = value
self._psdf_or_psser._update_internal_frame(
psdf[list(self._psdf_or_psser.columns)]._internal.resolved_copy,
requires_same_anchor=False,
)
return
cond, limit, remaining_index = self._select_rows(rows_sel)
missing_keys = [] # type: Optional[List[Name]]
_, data_spark_columns, _, _, _ = self._select_cols(cols_sel, missing_keys=missing_keys)
if cond is None:
cond = SF.lit(True)
if limit is not None:
cond = cond & (
self._internal.spark_frame[cast(iLocIndexer, self)._sequence_col]
< SF.lit(limit)
)
if isinstance(value, (Series, Column)):
if remaining_index is not None and remaining_index == 0:
raise ValueError("Incompatible indexer with Series")
if len(data_spark_columns) > 1:
raise ValueError("shape mismatch")
if isinstance(value, Series):
value = value.spark.column
else:
value = SF.lit(value)
new_data_spark_columns = []
new_fields = []
for new_scol, spark_column_name, new_field in zip(
self._internal.data_spark_columns,
self._internal.data_spark_column_names,
self._internal.data_fields,
):
for scol in data_spark_columns:
if spark_column_equals(new_scol, scol):
new_scol = F.when(cond, value).otherwise(scol).alias(spark_column_name)
new_field = InternalField.from_struct_field(
self._internal.spark_frame.select(new_scol).schema[0],
use_extension_dtypes=new_field.is_extension_dtype,
)
break
new_data_spark_columns.append(new_scol)
new_fields.append(new_field)
column_labels = self._internal.column_labels.copy()
for missing in missing_keys:
if is_name_like_tuple(missing):
label = cast(Label, missing)
else:
label = cast(Label, (missing,))
if len(label) < self._internal.column_labels_level:
label = tuple(
list(label) + ([""] * (self._internal.column_labels_level - len(label)))
)
elif len(label) > self._internal.column_labels_level:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(label), self._internal.column_labels_level
)
)
column_labels.append(label)
new_data_spark_columns.append(F.when(cond, value).alias(name_like_string(label)))
new_fields.append(None)
internal = self._internal.with_new_columns(
new_data_spark_columns, column_labels=column_labels, data_fields=new_fields
)
self._psdf_or_psser._update_internal_frame(internal, requires_same_anchor=False)
class LocIndexer(LocIndexerLike):
"""
Access a group of rows and columns by label(s) or a boolean Series.
``.loc[]`` is primarily label based, but may also be used with a
conditional boolean Series derived from the DataFrame or Series.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index) for column selection.
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'``.
- A conditional boolean Series derived from the DataFrame or Series
- A boolean array of the same length as the column axis being sliced,
e.g. ``[True, False, True]``.
- An alignable boolean pandas Series to the column axis being sliced.
The index of the key will be aligned before masking.
Not allowed inputs which pandas allows are:
- A boolean array of the same length as the row axis being sliced,
e.g. ``[True, False, True]``.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
.. note:: MultiIndex is not supported yet.
.. note:: Note that contrary to usual python slices, **both** the
start and the stop are included, and the step of the slice is not allowed.
.. note:: With a list or array of labels for row selection,
pandas-on-Spark behaves as a filter without reordering by the labels.
See Also
--------
Series.loc : Access group of values using labels.
Examples
--------
**Getting values**
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
Single label. Note this returns the row as a Series.
>>> df.loc['viper']
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
Also note that pandas-on-Spark behaves just a filter without reordering by the labels.
>>> df.loc[['viper', 'sidewinder']]
max_speed shield
viper 4 5
sidewinder 7 8
>>> df.loc[['sidewinder', 'viper']]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for column.
>>> df.loc['cobra', 'shield']
2
List of labels for row.
>>> df.loc[['cobra'], 'shield']
cobra 2
Name: shield, dtype: int64
List of labels for column.
>>> df.loc['cobra', ['shield']]
shield 2
Name: cobra, dtype: int64
List of labels for both row and column.
>>> df.loc[['cobra'], ['shield']]
shield
cobra 2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
>>> df.loc['cobra':'viper', 'max_speed']
cobra 1
viper 4
Name: max_speed, dtype: int64
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
>>> df.loc[df['shield'] > 6, ['max_speed']]
max_speed
sidewinder 7
A boolean array of the same length as the column axis being sliced.
>>> df.loc[:, [False, True]]
shield
cobra 2
viper 5
sidewinder 8
An alignable boolean Series to the column axis being sliced.
>>> df.loc[:, pd.Series([False, True], index=['max_speed', 'shield'])]
shield
cobra 2
viper 5
sidewinder 8
**Setting values**
Setting value for all items matching the list of labels.
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
>>> df
max_speed shield
cobra 1 2
viper 4 50
sidewinder 7 50
Setting value for an entire row
>>> df.loc['cobra'] = 10
>>> df
max_speed shield
cobra 10 10
viper 4 50
sidewinder 7 50
Set value for an entire column
>>> df.loc[:, 'max_speed'] = 30
>>> df
max_speed shield
cobra 30 10
viper 30 50
sidewinder 30 50
Set value for an entire list of columns
>>> df.loc[:, ['max_speed', 'shield']] = 100
>>> df
max_speed shield
cobra 100 100
viper 100 100
sidewinder 100 100
Set value with Series
>>> df.loc[:, 'shield'] = df['shield'] * 2
>>> df
max_speed shield
cobra 100 200
viper 100 200
sidewinder 100 200
**Getting values on a DataFrame with an index that has integer labels**
Another example using integers for the index
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=[7, 8, 9],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
7 1 2
8 4 5
9 7 8
Slice with integer labels for rows. As mentioned above, note that both
the start and stop of the slice are included.
>>> df.loc[7:9]
max_speed shield
7 1 2
8 4 5
9 7 8
"""
@staticmethod
def _NotImplemented(description: str) -> SparkPandasNotImplementedError:
return SparkPandasNotImplementedError(
description=description,
pandas_function=".loc[..., ...]",
spark_target_function="select, where",
)
def _select_rows_by_series(
self, rows_sel: "Series"
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
assert isinstance(rows_sel.spark.data_type, BooleanType), rows_sel.spark.data_type
return rows_sel.spark.column, None, None
def _select_rows_by_spark_column(
self, rows_sel: Column
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
spark_type = self._internal.spark_frame.select(rows_sel).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
return rows_sel, None, None
def _select_rows_by_slice(
self, rows_sel: slice
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
from pyspark.pandas.indexes import MultiIndex
if rows_sel.step is not None:
raise LocIndexer._NotImplemented("Cannot use step with Spark.")
elif self._internal.index_level == 1:
sdf = self._internal.spark_frame
index = self._psdf_or_psser.index
index_column = index.to_series()
index_data_type = index_column.spark.data_type
start = rows_sel.start
stop = rows_sel.stop
# get natural order from '__natural_order__' from start to stop
# to keep natural order.
start_and_stop = (
sdf.select(index_column.spark.column, NATURAL_ORDER_COLUMN_NAME)
.where(
(index_column.spark.column == SF.lit(start).cast(index_data_type))
| (index_column.spark.column == SF.lit(stop).cast(index_data_type))
)
.collect()
)
start = [row[1] for row in start_and_stop if row[0] == start]
start = start[0] if len(start) > 0 else None
stop = [row[1] for row in start_and_stop if row[0] == stop]
stop = stop[-1] if len(stop) > 0 else None
conds = [] # type: List[Column]
if start is not None:
conds.append(F.col(NATURAL_ORDER_COLUMN_NAME) >= SF.lit(start).cast(LongType()))
if stop is not None:
conds.append(F.col(NATURAL_ORDER_COLUMN_NAME) <= SF.lit(stop).cast(LongType()))
# if index order is not monotonic increasing or decreasing
# and specified values don't exist in index, raise KeyError
if (start is None and rows_sel.start is not None) or (
stop is None and rows_sel.stop is not None
):
inc = index_column.is_monotonic_increasing
if inc is False:
dec = index_column.is_monotonic_decreasing
if start is None and rows_sel.start is not None:
start = rows_sel.start
if inc is not False:
conds.append(
index_column.spark.column >= SF.lit(start).cast(index_data_type)
)
elif dec is not False:
conds.append(
index_column.spark.column <= SF.lit(start).cast(index_data_type)
)
else:
raise KeyError(rows_sel.start)
if stop is None and rows_sel.stop is not None:
stop = rows_sel.stop
if inc is not False:
conds.append(
index_column.spark.column <= SF.lit(stop).cast(index_data_type)
)
elif dec is not False:
conds.append(
index_column.spark.column >= SF.lit(stop).cast(index_data_type)
)
else:
raise KeyError(rows_sel.stop)
return reduce(lambda x, y: x & y, conds), None, None
else:
index = self._psdf_or_psser.index
index_data_type = [f.dataType for f in index.to_series().spark.data_type]
start = rows_sel.start
if start is not None:
if not isinstance(start, tuple):
start = (start,)
if len(start) == 0:
start = None
stop = rows_sel.stop
if stop is not None:
if not isinstance(stop, tuple):
stop = (stop,)
if len(stop) == 0:
stop = None
depth = max(
len(start) if start is not None else 0, len(stop) if stop is not None else 0
)
if depth == 0:
return None, None, None
elif (
depth > self._internal.index_level
or not index.droplevel(list(range(self._internal.index_level)[depth:])).is_monotonic
):
raise KeyError(
"Key length ({}) was greater than MultiIndex sort depth".format(depth)
)
conds = []
if start is not None:
cond = SF.lit(True)
for scol, value, dt in list(
zip(self._internal.index_spark_columns, start, index_data_type)
)[::-1]:
compare = MultiIndex._comparator_for_monotonic_increasing(dt)
cond = F.when(scol.eqNullSafe(SF.lit(value).cast(dt)), cond).otherwise(
compare(scol, SF.lit(value).cast(dt), Column.__gt__)
)
conds.append(cond)
if stop is not None:
cond = SF.lit(True)
for scol, value, dt in list(
zip(self._internal.index_spark_columns, stop, index_data_type)
)[::-1]:
compare = MultiIndex._comparator_for_monotonic_increasing(dt)
cond = F.when(scol.eqNullSafe(SF.lit(value).cast(dt)), cond).otherwise(
compare(scol, SF.lit(value).cast(dt), Column.__lt__)
)
conds.append(cond)
return reduce(lambda x, y: x & y, conds), None, None
def _select_rows_by_iterable(
self, rows_sel: Iterable
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
rows_sel = list(rows_sel)
if len(rows_sel) == 0:
return SF.lit(False), None, None
elif self._internal.index_level == 1:
index_column = self._psdf_or_psser.index.to_series()
index_data_type = index_column.spark.data_type
if len(rows_sel) == 1:
return (
index_column.spark.column == SF.lit(rows_sel[0]).cast(index_data_type),
None,
None,
)
else:
return (
index_column.spark.column.isin(
[SF.lit(r).cast(index_data_type) for r in rows_sel]
),
None,
None,
)
else:
raise LocIndexer._NotImplemented("Cannot select with MultiIndex with Spark.")
def _select_rows_else(
self, rows_sel: Any
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
if not isinstance(rows_sel, tuple):
rows_sel = (rows_sel,)
if len(rows_sel) > self._internal.index_level:
raise SparkPandasIndexingError("Too many indexers")
rows = [scol == value for scol, value in zip(self._internal.index_spark_columns, rows_sel)]
return (
reduce(lambda x, y: x & y, rows),
None,
self._internal.index_level - len(rows_sel),
)
def _get_from_multiindex_column(
self,
key: Optional[Label],
missing_keys: Optional[List[Name]],
labels: Optional[List[Tuple[Label, Label]]] = None,
recursed: int = 0,
) -> Tuple[List[Label], Optional[List[Column]], List[InternalField], bool, Optional[Name]]:
"""Select columns from multi-index columns."""
assert isinstance(key, tuple)
if labels is None:
labels = [(label, label) for label in self._internal.column_labels]
for k in key:
labels = [
(label, None if lbl is None else lbl[1:])
for label, lbl in labels
if (lbl is None and k is None) or (lbl is not None and lbl[0] == k)
]
if len(labels) == 0:
if missing_keys is None:
raise KeyError(k)
else:
missing_keys.append(key)
return [], [], [], False, None
if all(lbl is not None and len(lbl) > 0 and lbl[0] == "" for _, lbl in labels):
# If the head is '', drill down recursively.
labels = [(label, tuple([str(key), *lbl[1:]])) for i, (label, lbl) in enumerate(labels)]
return self._get_from_multiindex_column((str(key),), missing_keys, labels, recursed + 1)
else:
returns_series = all(lbl is None or len(lbl) == 0 for _, lbl in labels)
if returns_series:
label_set = set(label for label, _ in labels)
assert len(label_set) == 1
label = list(label_set)[0]
column_labels = [label]
data_spark_columns = [self._internal.spark_column_for(label)]
data_fields = [self._internal.field_for(label)]
if label is None:
series_name = None # type: Name
else:
if recursed > 0:
label = label[:-recursed]
series_name = label if len(label) > 1 else label[0]
else:
column_labels = [
None if lbl is None or lbl == (None,) else lbl for _, lbl in labels
]
data_spark_columns = [self._internal.spark_column_for(label) for label, _ in labels]
data_fields = [self._internal.field_for(label) for label, _ in labels]
series_name = None
return column_labels, data_spark_columns, data_fields, returns_series, series_name
def _select_cols_by_series(
self, cols_sel: "Series", missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
column_labels = cols_sel._internal.column_labels
data_spark_columns = cols_sel._internal.data_spark_columns
data_fields = cols_sel._internal.data_fields
return column_labels, data_spark_columns, data_fields, True, None
def _select_cols_by_spark_column(
self, cols_sel: Column, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
column_labels = [
(self._internal.spark_frame.select(cols_sel).columns[0],)
] # type: List[Label]
data_spark_columns = [cols_sel]
return column_labels, data_spark_columns, None, True, None
def _select_cols_by_slice(
self, cols_sel: slice, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
start, stop = self._psdf_or_psser.columns.slice_locs(
start=cols_sel.start, end=cols_sel.stop
)
column_labels = self._internal.column_labels[start:stop]
data_spark_columns = self._internal.data_spark_columns[start:stop]
data_fields = self._internal.data_fields[start:stop]
return column_labels, data_spark_columns, data_fields, False, None
def _select_cols_by_iterable(
self, cols_sel: Iterable, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
from pyspark.pandas.series import Series
if all(isinstance(key, Series) for key in cols_sel):
column_labels = [key._column_label for key in cols_sel]
data_spark_columns = [key.spark.column for key in cols_sel]
data_fields = [key._internal.data_fields[0] for key in cols_sel]
elif all(isinstance(key, Column) for key in cols_sel):
column_labels = [
(self._internal.spark_frame.select(col).columns[0],) for col in cols_sel
]
data_spark_columns = list(cols_sel)
data_fields = None
elif all(isinstance(key, bool) for key in cols_sel) or all(
isinstance(key, np.bool_) for key in cols_sel
):
if len(cast(Sized, cols_sel)) != len(self._internal.column_labels):
raise IndexError(
"Boolean index has wrong length: %s instead of %s"
% (len(cast(Sized, cols_sel)), len(self._internal.column_labels))
)
if isinstance(cols_sel, pd.Series):
if not cols_sel.index.sort_values().equals(self._psdf.columns.sort_values()):
raise SparkPandasIndexingError(
"Unalignable boolean Series provided as indexer "
"(index of the boolean Series and of the indexed object do not match)"
)
else:
column_labels = [
column_label
for column_label in self._internal.column_labels
if cols_sel[column_label if len(column_label) > 1 else column_label[0]]
]
data_spark_columns = [
self._internal.spark_column_for(column_label)
for column_label in column_labels
]
data_fields = [
self._internal.field_for(column_label) for column_label in column_labels
]
else:
column_labels = [
self._internal.column_labels[i] for i, col in enumerate(cols_sel) if col
]
data_spark_columns = [
self._internal.data_spark_columns[i] for i, col in enumerate(cols_sel) if col
]
data_fields = [
self._internal.data_fields[i] for i, col in enumerate(cols_sel) if col
]
elif any(isinstance(key, tuple) for key in cols_sel) and any(
not is_name_like_tuple(key) for key in cols_sel
):
raise TypeError(
"Expected tuple, got {}".format(
type(set(key for key in cols_sel if not is_name_like_tuple(key)).pop())
)
)
else:
if missing_keys is None and all(isinstance(key, tuple) for key in cols_sel):
level = self._internal.column_labels_level
if any(len(key) != level for key in cols_sel):
raise ValueError("All the key level should be the same as column index level.")
column_labels = []
data_spark_columns = []
data_fields = []
for key in cols_sel:
found = False
for label in self._internal.column_labels:
if label == key or label[0] == key:
column_labels.append(label)
data_spark_columns.append(self._internal.spark_column_for(label))
data_fields.append(self._internal.field_for(label))
found = True
if not found:
if missing_keys is None:
raise KeyError("['{}'] not in index".format(name_like_string(key)))
else:
missing_keys.append(key)
return column_labels, data_spark_columns, data_fields, False, None
def _select_cols_else(
self, cols_sel: Any, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
if not is_name_like_tuple(cols_sel):
cols_sel = (cols_sel,)
return self._get_from_multiindex_column(cols_sel, missing_keys)
class iLocIndexer(LocIndexerLike):
"""
Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a conditional boolean Series.
Allowed inputs are:
- An integer for column selection, e.g. ``5``.
- A list or array of integers for row selection with distinct index values,
e.g. ``[3, 4, 0]``
- A list or array of integers for column selection, e.g. ``[4, 3, 0]``.
- A boolean array for column selection.
- A slice object with ints for row and column selection, e.g. ``1:7``.
Not allowed inputs which pandas allows are:
- A list or array of integers for row selection with duplicated indexes,
e.g. ``[4, 4, 0]``.
- A boolean array for row selection.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See Also
--------
DataFrame.loc : Purely label-location based indexer for selection by label.
Series.iloc : Purely integer-location based indexing for
selection by position.
Examples
--------
>>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
... {'a': 100, 'b': 200, 'c': 300, 'd': 400},
... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
>>> df = ps.DataFrame(mydict, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing just the rows**
A scalar integer for row selection.
>>> df.iloc[1]
a 100
b 200
c 300
d 400
Name: 1, dtype: int64
>>> df.iloc[[0]]
a b c d
0 1 2 3 4
With a `slice` object.
>>> df.iloc[:3]
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing both axes**
You can mix the indexer types for the index and columns. Use ``:`` to
select the entire axis.
With scalar integers.
>>> df.iloc[:1, 1]
0 2
Name: b, dtype: int64
With lists of integers.
>>> df.iloc[:2, [1, 3]]
b d
0 2 4
1 200 400
With `slice` objects.
>>> df.iloc[:2, 0:3]
a b c
0 1 2 3
1 100 200 300
With a boolean array whose length matches the columns.
>>> df.iloc[:, [True, False, True, False]]
a c
0 1 3
1 100 300
2 1000 3000
**Setting values**
Setting value for all items matching the list of labels.
>>> df.iloc[[1, 2], [1]] = 50
>>> df
a b c d
0 1 2 3 4
1 100 50 300 400
2 1000 50 3000 4000
Setting value for an entire row
>>> df.iloc[0] = 10
>>> df
a b c d
0 10 10 10 10
1 100 50 300 400
2 1000 50 3000 4000
Set value for an entire column
>>> df.iloc[:, 2] = 30
>>> df
a b c d
0 10 10 30 10
1 100 50 30 400
2 1000 50 30 4000
Set value for an entire list of columns
>>> df.iloc[:, [2, 3]] = 100
>>> df
a b c d
0 10 10 100 100
1 100 50 100 100
2 1000 50 100 100
Set value with Series
>>> df.iloc[:, 3] = df.iloc[:, 3] * 2
>>> df
a b c d
0 10 10 100 200
1 100 50 100 200
2 1000 50 100 200
"""
@staticmethod
def _NotImplemented(description: str) -> SparkPandasNotImplementedError:
return SparkPandasNotImplementedError(
description=description,
pandas_function=".iloc[..., ...]",
spark_target_function="select, where",
)
@lazy_property
def _internal(self) -> "InternalFrame":
# Use resolved_copy to fix the natural order.
internal = super()._internal.resolved_copy
sdf, force_nullable = InternalFrame.attach_distributed_sequence_column(
internal.spark_frame, column_name=self._sequence_col
)
return internal.with_new_sdf(
spark_frame=sdf.orderBy(NATURAL_ORDER_COLUMN_NAME),
index_fields=(
[field.copy(nullable=True) for field in internal.index_fields]
if force_nullable
else internal.index_fields
),
data_fields=(
[field.copy(nullable=True) for field in internal.data_fields]
if force_nullable
else internal.data_fields
),
)
@lazy_property
def _sequence_col(self) -> str:
# Use resolved_copy to fix the natural order.
internal = super()._internal.resolved_copy
return verify_temp_column_name(internal.spark_frame, "__distributed_sequence_column__")
def _select_rows_by_series(
self, rows_sel: "Series"
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
raise iLocIndexer._NotImplemented(
".iloc requires numeric slice, conditional "
"boolean Index or a sequence of positions as int, "
"got {}".format(type(rows_sel))
)
def _select_rows_by_spark_column(
self, rows_sel: Column
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
raise iLocIndexer._NotImplemented(
".iloc requires numeric slice, conditional "
"boolean Index or a sequence of positions as int, "
"got {}".format(type(rows_sel))
)
def _select_rows_by_slice(
self, rows_sel: slice
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
def verify_type(i: int) -> None:
if not isinstance(i, int):
raise TypeError(
"cannot do slice indexing with these indexers [{}] of {}".format(i, type(i))
)
has_negative = False
start = rows_sel.start
if start is not None:
verify_type(start)
if start == 0:
start = None
elif start < 0:
has_negative = True
stop = rows_sel.stop
if stop is not None:
verify_type(stop)
if stop < 0:
has_negative = True
step = rows_sel.step
if step is not None:
verify_type(step)
if step == 0:
raise ValueError("slice step cannot be zero")
else:
step = 1
if start is None and step == 1:
return None, stop, None
sdf = self._internal.spark_frame
sequence_scol = sdf[self._sequence_col]
if has_negative or (step < 0 and start is None):
cnt = sdf.count()
cond = []
if start is not None:
if start < 0:
start = start + cnt
if step >= 0:
cond.append(sequence_scol >= SF.lit(start).cast(LongType()))
else:
cond.append(sequence_scol <= SF.lit(start).cast(LongType()))
if stop is not None:
if stop < 0:
stop = stop + cnt
if step >= 0:
cond.append(sequence_scol < SF.lit(stop).cast(LongType()))
else:
cond.append(sequence_scol > SF.lit(stop).cast(LongType()))
if step != 1:
if step > 0:
start = start or 0
else:
start = start or (cnt - 1)
cond.append(((sequence_scol - start) % SF.lit(step).cast(LongType())) == SF.lit(0))
return reduce(lambda x, y: x & y, cond), None, None
def _select_rows_by_iterable(
self, rows_sel: Iterable
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
sdf = self._internal.spark_frame
if any(isinstance(key, (int, np.int, np.int64, np.int32)) and key < 0 for key in rows_sel):
offset = sdf.count()
else:
offset = 0
new_rows_sel = []
for key in list(rows_sel):
if not isinstance(key, (int, np.int, np.int64, np.int32)):
raise TypeError(
"cannot do positional indexing with these indexers [{}] of {}".format(
key, type(key)
)
)
if key < 0:
key = key + offset
new_rows_sel.append(key)
if len(new_rows_sel) != len(set(new_rows_sel)):
raise NotImplementedError(
"Duplicated row selection is not currently supported; "
"however, normalised index was [%s]" % new_rows_sel
)
sequence_scol = sdf[self._sequence_col]
cond = []
for key in new_rows_sel:
cond.append(sequence_scol == SF.lit(int(key)).cast(LongType()))
if len(cond) == 0:
cond = [SF.lit(False)]
return reduce(lambda x, y: x | y, cond), None, None
def _select_rows_else(
self, rows_sel: Any
) -> Tuple[Optional[Column], Optional[int], Optional[int]]:
if isinstance(rows_sel, int):
sdf = self._internal.spark_frame
return (sdf[self._sequence_col] == rows_sel), None, 0
elif isinstance(rows_sel, tuple):
raise SparkPandasIndexingError("Too many indexers")
else:
raise iLocIndexer._NotImplemented(
".iloc requires numeric slice, conditional "
"boolean Index or a sequence of positions as int, "
"got {}".format(type(rows_sel))
)
def _select_cols_by_series(
self, cols_sel: "Series", missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
raise ValueError(
"Location based indexing can only have [integer, integer slice, "
"listlike of integers, boolean array] types, got {}".format(cols_sel)
)
def _select_cols_by_spark_column(
self, cols_sel: Column, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
raise ValueError(
"Location based indexing can only have [integer, integer slice, "
"listlike of integers, boolean array] types, got {}".format(cols_sel)
)
def _select_cols_by_slice(
self, cols_sel: slice, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
if all(
s is None or isinstance(s, int) for s in (cols_sel.start, cols_sel.stop, cols_sel.step)
):
column_labels = self._internal.column_labels[cols_sel]
data_spark_columns = self._internal.data_spark_columns[cols_sel]
data_fields = self._internal.data_fields[cols_sel]
return column_labels, data_spark_columns, data_fields, False, None
else:
not_none = (
cols_sel.start
if cols_sel.start is not None
else cols_sel.stop
if cols_sel.stop is not None
else cols_sel.step
)
raise TypeError(
"cannot do slice indexing with these indexers {} of {}".format(
not_none, type(not_none)
)
)
def _select_cols_by_iterable(
self, cols_sel: Iterable, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
if all(isinstance(s, bool) for s in cols_sel):
cols_sel = [i for i, s in enumerate(cols_sel) if s]
if all(isinstance(s, int) for s in cols_sel):
column_labels = [self._internal.column_labels[s] for s in cols_sel]
data_spark_columns = [self._internal.data_spark_columns[s] for s in cols_sel]
data_fields = [self._internal.data_fields[s] for s in cols_sel]
return column_labels, data_spark_columns, data_fields, False, None
else:
raise TypeError("cannot perform reduce with flexible type")
def _select_cols_else(
self, cols_sel: Any, missing_keys: Optional[List[Name]]
) -> Tuple[
List[Label],
Optional[List[Column]],
Optional[List[InternalField]],
bool,
Optional[Name],
]:
if isinstance(cols_sel, int):
if cols_sel > len(self._internal.column_labels):
raise KeyError(cols_sel)
column_labels = [self._internal.column_labels[cols_sel]]
data_spark_columns = [self._internal.data_spark_columns[cols_sel]]
data_fields = [self._internal.data_fields[cols_sel]]
return column_labels, data_spark_columns, data_fields, True, None
else:
raise ValueError(
"Location based indexing can only have [integer, integer slice, "
"listlike of integers, boolean array] types, got {}".format(cols_sel)
)
def __setitem__(self, key: Any, value: Any) -> None:
if is_list_like(value) and not isinstance(value, Column):
iloc_item = self[key]
if not is_list_like(key) or not is_list_like(iloc_item):
raise ValueError("setting an array element with a sequence.")
else:
shape_iloc_item = iloc_item.shape
len_iloc_item = shape_iloc_item[0]
len_value = len(value)
if len_iloc_item != len_value:
if self._is_series:
raise ValueError(
"cannot set using a list-like indexer with a different length than "
"the value"
)
else:
raise ValueError(
"shape mismatch: value array of shape ({},) could not be broadcast "
"to indexing result of shape {}".format(len_value, shape_iloc_item)
)
super().__setitem__(key, value)
# Update again with resolved_copy to drop extra columns.
self._psdf._update_internal_frame(
self._psdf._internal.resolved_copy, requires_same_anchor=False
)
# Clean up implicitly cached properties to be able to reuse the indexer.
del self._internal
del self._sequence_col
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.indexing
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.indexing.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.indexing tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.indexing,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
tawsifkhan/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
deroneriksson/systemml | projects/breast_cancer/preprocess.py | 15 | 4370 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Preprocess -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This script runs the preprocessing phase of the breast cancer project.
"""
import os
import shutil
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from pyspark.sql import SparkSession
from breastcancer.preprocessing import add_row_indices, get_labels_df, preprocess, save, sample
# Create new SparkSession
spark = (SparkSession.builder
.appName("Breast Cancer -- Preprocessing")
.getOrCreate())
# Ship a fresh copy of the `breastcancer` package to the Spark workers.
# Note: The zip must include the `breastcancer` directory itself,
# as well as all files within it for `addPyFile` to work correctly.
# This is equivalent to `zip -r breastcancer.zip breastcancer`.
dirname = "breastcancer"
zipname = dirname + ".zip"
shutil.make_archive(dirname, 'zip', dirname + "/..", dirname)
spark.sparkContext.addPyFile(zipname)
# Execute Preprocessing & Save
# TODO: Filtering tiles and then cutting into samples could result
# in samples with less tissue than desired, despite that being the
# procedure of the paper. Look into simply selecting tiles of the
# desired size to begin with.
# Settings
# TODO: Convert this to a set of parsed command line arguments
tile_size = 256
sample_size = 256
grayscale = False
num_partitions = 20000
training = True
row_indices = False
train_frac = 0.8
sample_frac=0.01
seed = 42
folder = "data" # Linux-filesystem directory to read raw WSI data
save_folder = "data" # Hadoop-supported directory in which to save DataFrames
train_df_path = os.path.join(save_folder, "train_{}{}.parquet".format(sample_size,
"_grayscale" if grayscale else ""))
val_df_path = os.path.join(save_folder, "val_{}{}.parquet".format(sample_size,
"_grayscale" if grayscale else ""))
train_sample_path = os.path.join(save_folder, "train_{}_sample_{}{}.parquet".format(sample_frac,
sample_size, "_grayscale" if grayscale else ""))
val_sample_path = os.path.join(save_folder, "val_{}_sample_{}{}.parquet".format(sample_frac,
sample_size, "_grayscale" if grayscale else ""))
# Get labels
labels_df = get_labels_df(folder)
# Split into train and validation sets based on slide number, stratified by class
train, val = train_test_split(labels_df, train_size=train_frac, stratify=labels_df['tumor_score'],
random_state=seed)
# Process train & val slides
train_df = preprocess(spark, train.index, tile_size=tile_size, sample_size=sample_size,
grayscale=grayscale, num_partitions=num_partitions, folder=folder)
val_df = preprocess(spark, val.index, tile_size=tile_size, sample_size=sample_size,
grayscale=grayscale, num_partitions=num_partitions, folder=folder)
if row_indices:
# Add row indices
train_df = add_row_indices(train_df)
val_df = add_row_indices(val_df)
# Save train & val DataFrames
save(train_df, train_df_path, sample_size, grayscale)
save(val_df, val_df_path, sample_size, grayscale)
if sample_frac > 0:
# Sample Data
train_df = spark.read.load(train_df_path)
val_df = spark.read.load(val_df_path)
train_sample = sample(train_df, sample_frac, seed)
val_sample = sample(val_df, sample_frac, seed)
# Save sampled DataFrames.
save(train_sample, train_sample_path, sample_size, grayscale)
save(val_sample, val_sample_path, sample_size, grayscale)
| apache-2.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| mit |
joernhees/scikit-learn | sklearn/kernel_ridge.py | 48 | 6731 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/animation.py | 2 | 69777 | # TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import numpy as np
import os
import platform
import sys
import itertools
try:
# python3
from base64 import encodebytes
except ImportError:
# python2
from base64 import encodestring as encodebytes
import abc
import contextlib
import tempfile
import uuid
import warnings
from matplotlib._animation_data import (DISPLAY_TEMPLATE, INCLUDED_FRAMES,
JS_INCLUDE)
from matplotlib.cbook import iterable, deprecated
from matplotlib.compat import subprocess
from matplotlib import verbose
from matplotlib import rcParams, rcParamsDefault, rc_context
if sys.version_info < (3, 0):
from cStringIO import StringIO as InMemory
else:
from io import BytesIO as InMemory
# Process creation flag for subprocess to prevent it raising a terminal
# window. See for example:
# https://stackoverflow.com/questions/24130623/using-python-subprocess-popen-cant-prevent-exe-stopped-working-prompt
if platform.system() == 'Windows':
subprocess_creation_flags = CREATE_NO_WINDOW = 0x08000000
else:
# Apparently None won't work here
subprocess_creation_flags = 0
# Other potential writing methods:
# * http://pymedia.org/
# * libmng (produces swf) python wrappers: https://github.com/libming/libming
# * Wrap x264 API:
# (http://stackoverflow.com/questions/2940671/
# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
def adjusted_figsize(w, h, dpi, n):
'''Compute figure size so that pixels are a multiple of n
Parameters
----------
w, h : float
Size in inches
dpi : float
The dpi
n : int
The target multiple
Returns
-------
wnew, hnew : float
The new figure size in inches.
'''
# this maybe simplified if / when we adopt consistent rounding for
# pixel size across the whole library
def correct_roundoff(x, dpi, n):
if int(x*dpi) % n != 0:
if int(np.nextafter(x, np.inf)*dpi) % n == 0:
x = np.nextafter(x, np.inf)
elif int(np.nextafter(x, -np.inf)*dpi) % n == 0:
x = np.nextafter(x, -np.inf)
return x
wnew = int(w * dpi / n) * n / dpi
hnew = int(h * dpi / n) * n / dpi
return (correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n))
# A registry for available MovieWriter classes
class MovieWriterRegistry(object):
'''Registry of available writer classes by human readable name.'''
def __init__(self):
self.avail = dict()
self._registered = dict()
self._dirty = False
def set_dirty(self):
"""Sets a flag to re-setup the writers."""
self._dirty = True
def register(self, name):
"""Decorator for registering a class under a name.
Example use::
@registry.register(name)
class Foo:
pass
"""
def wrapper(writerClass):
self._registered[name] = writerClass
if writerClass.isAvailable():
self.avail[name] = writerClass
return writerClass
return wrapper
def ensure_not_dirty(self):
"""If dirty, reasks the writers if they are available"""
if self._dirty:
self.reset_available_writers()
def reset_available_writers(self):
"""Reset the available state of all registered writers"""
self.avail = {}
for name, writerClass in self._registered.items():
if writerClass.isAvailable():
self.avail[name] = writerClass
self._dirty = False
def list(self):
'''Get a list of available MovieWriters.'''
self.ensure_not_dirty()
return list(self.avail)
def is_available(self, name):
'''Check if given writer is available by name.
Parameters
----------
name : str
Returns
-------
available : bool
'''
self.ensure_not_dirty()
return name in self.avail
def __getitem__(self, name):
self.ensure_not_dirty()
if not self.avail:
raise RuntimeError("No MovieWriters available!")
return self.avail[name]
writers = MovieWriterRegistry()
class AbstractMovieWriter(six.with_metaclass(abc.ABCMeta)):
'''
Abstract base class for writing movies. Fundamentally, what a MovieWriter
does is provide is a way to grab frames by calling grab_frame().
setup() is called to start the process and finish() is called afterwards.
This class is set up to provide for writing movie frame data to a pipe.
saving() is provided as a context manager to facilitate this process as::
with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100):
# Iterate over frames
moviewriter.grab_frame(**savefig_kwargs)
The use of the context manager ensures that setup() and finish() are
performed as necessary.
An instance of a concrete subclass of this class can be given as the
``writer`` argument of `Animation.save()`.
'''
@abc.abstractmethod
def setup(self, fig, outfile, dpi=None):
'''
Perform setup for writing the movie file.
Parameters
----------
fig: `matplotlib.figure.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int, optional
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file. Default is ``fig.dpi``.
'''
@abc.abstractmethod
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the `savefig`
command that saves the figure.
'''
@abc.abstractmethod
def finish(self):
'''Finish any processing for writing the movie.'''
@contextlib.contextmanager
def saving(self, fig, outfile, dpi, *args, **kwargs):
'''
Context manager to facilitate writing the movie file.
``*args, **kw`` are any parameters that should be passed to `setup`.
'''
# This particular sequence is what contextlib.contextmanager wants
self.setup(fig, outfile, dpi, *args, **kwargs)
try:
yield self
finally:
self.finish()
class MovieWriter(AbstractMovieWriter):
'''Base class for writing movies.
This class is set up to provide for writing movie frame data to a pipe.
See examples for how to use these classes.
Attributes
----------
frame_format : str
The format used in writing frame data, defaults to 'rgba'
fig : `~matplotlib.figure.Figure`
The figure to capture data from.
This must be provided by the sub-classes.
'''
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
'''MovieWriter
Parameters
----------
fps: int
Framerate for movie.
codec: string or None, optional
The codec to use. If ``None`` (the default) the ``animation.codec``
rcParam is used.
bitrate: int or None, optional
The bitrate for the saved movie file, which is one way to control
the output file size and quality. The default value is ``None``,
which uses the ``animation.bitrate`` rcParam. A value of -1
implies that the bitrate should be determined automatically by the
underlying utility.
extra_args: list of strings or None, optional
A list of extra string arguments to be passed to the underlying
movie utility. The default is ``None``, which passes the additional
arguments in the ``animation.extra_args`` rcParam.
metadata: Dict[str, str] or None
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
self.fps = fps
self.frame_format = 'rgba'
if codec is None:
self.codec = rcParams['animation.codec']
else:
self.codec = codec
if bitrate is None:
self.bitrate = rcParams['animation.bitrate']
else:
self.bitrate = bitrate
if extra_args is None:
self.extra_args = list(rcParams[self.args_key])
else:
self.extra_args = extra_args
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
@property
def frame_size(self):
'''A tuple ``(width, height)`` in pixels of a movie frame.'''
w, h = self.fig.get_size_inches()
return int(w * self.dpi), int(h * self.dpi)
def _adjust_frame_size(self):
if self.codec == 'h264':
wo, ho = self.fig.get_size_inches()
w, h = adjusted_figsize(wo, ho, self.dpi, 2)
if not (wo, ho) == (w, h):
self.fig.set_size_inches(w, h, forward=True)
verbose.report('figure size (inches) has been adjusted '
'from %s x %s to %s x %s' % (wo, ho, w, h),
level='helpful')
else:
w, h = self.fig.get_size_inches()
verbose.report('frame size in pixels is %s x %s' % self.frame_size,
level='debug')
return w, h
def setup(self, fig, outfile, dpi=None):
'''
Perform setup for writing the movie file.
Parameters
----------
fig : matplotlib.figure.Figure
The figure object that contains the information for frames
outfile : string
The filename of the resulting movie file
dpi : int, optional
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file. Default is fig.dpi.
'''
self.outfile = outfile
self.fig = fig
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
self._w, self._h = self._adjust_frame_size()
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
if verbose.ge('debug'):
output = sys.stdout
else:
output = subprocess.PIPE
verbose.report('MovieWriter.run: running command: %s' %
' '.join(command))
self._proc = subprocess.Popen(command, shell=False,
stdout=output, stderr=output,
stdin=subprocess.PIPE,
creationflags=subprocess_creation_flags)
def finish(self):
'''Finish any processing for writing the movie.'''
self.cleanup()
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the `savefig`
command that saves the figure.
'''
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# re-adjust the figure size in case it has been changed by the
# user. We must ensure that every frame is the same size or
# the movie will not save correctly.
self.fig.set_size_inches(self._w, self._h)
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
self.fig.savefig(self._frame_sink(), format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except (RuntimeError, IOError) as e:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out, err),
level='helpful')
raise IOError('Error saving animation to file (cause: {0}) '
'Stdout: {1} StdError: {2}. It may help to re-run '
'with --verbose-debug.'.format(e, out, err))
def _frame_sink(self):
'''Returns the place to which frames should be written.'''
return self._proc.stdin
def _args(self):
'''Assemble list of utility-specific command-line arguments.'''
return NotImplementedError("args needs to be implemented by subclass.")
def cleanup(self):
'''Clean-up and collect the process used to write the movie file.'''
out, err = self._proc.communicate()
self._frame_sink().close()
verbose.report('MovieWriter -- '
'Command stdout:\n%s' % out, level='debug')
verbose.report('MovieWriter -- '
'Command stderr:\n%s' % err, level='debug')
@classmethod
def bin_path(cls):
'''
Returns the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
'''
return str(rcParams[cls.exec_key])
@classmethod
def isAvailable(cls):
'''
Check to see if a MovieWriter subclass is actually available by
running the commandline tool.
'''
bin_path = cls.bin_path()
if not bin_path:
return False
try:
p = subprocess.Popen(
bin_path,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=subprocess_creation_flags)
return cls._handle_subprocess(p)
except OSError:
return False
@classmethod
def _handle_subprocess(cls, process):
process.communicate()
return True
class FileMovieWriter(MovieWriter):
'''`MovieWriter` for writing to individual files and stitching at the end.
This must be sub-classed to be useful.
'''
def __init__(self, *args, **kwargs):
MovieWriter.__init__(self, *args, **kwargs)
self.frame_format = rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi=None, frame_prefix='_tmp',
clear_temp=True):
'''Perform setup for writing the movie file.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to grab the rendered frames from.
outfile : str
The filename of the resulting movie file.
dpi : number, optional
The dpi of the output file. This, with the figure size,
controls the size in pixels of the resulting movie file.
Default is fig.dpi.
frame_prefix : str, optional
The filename prefix to use for temporary files. Defaults to
``'_tmp'``.
clear_temp : bool, optional
If the temporary files should be deleted after stitching
the final result. Setting this to ``False`` can be useful for
debugging. Defaults to ``True``.
'''
self.fig = fig
self.outfile = outfile
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
self._adjust_frame_size()
self.clear_temp = clear_temp
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_names = list()
self.fname_format_str = '%s%%07d.%s'
@property
def frame_format(self):
'''
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
'''
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def _frame_sink(self):
# Creates a filename for saving using the basename and the current
# counter.
fname = self._base_temp_name() % self._frame_counter
# Save the filename so we can delete it later if necessary
self._temp_names.append(fname)
verbose.report(
'FileMovieWriter.frame_sink: saving frame %d to fname=%s' %
(self._frame_counter, fname),
level='debug')
self._frame_counter += 1 # Ensures each created name is 'unique'
# This file returned here will be closed once it's used by savefig()
# because it will no longer be referenced and will be gc-ed.
return open(fname, 'wb')
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the `savefig`
command that saves the figure.
'''
# Overloaded to explicitly close temp file.
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
with self._frame_sink() as myframesink:
self.fig.savefig(myframesink, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except RuntimeError:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out,
err), level='helpful')
raise
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
self._run()
MovieWriter.finish(self) # Will call clean-up
# Check error code for creating file here, since we just run
# the process here, rather than having an open pipe.
if self._proc.returncode:
try:
stdout = [s.decode() for s in self._proc._stdout_buff]
stderr = [s.decode() for s in self._proc._stderr_buff]
verbose.report("MovieWriter.finish: stdout: %s" % stdout,
level='helpful')
verbose.report("MovieWriter.finish: stderr: %s" % stderr,
level='helpful')
except Exception as e:
pass
msg = ('Error creating movie, return code: ' +
str(self._proc.returncode) +
' Try setting mpl.verbose.set_level("helpful")')
raise RuntimeError(msg)
def cleanup(self):
MovieWriter.cleanup(self)
# Delete temporary files
if self.clear_temp:
verbose.report(
'MovieWriter: clearing temporary fnames=%s' %
str(self._temp_names),
level='debug')
for fname in self._temp_names:
os.remove(fname)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase(object):
'''Mixin class for FFMpeg output.
To be useful this must be multiply-inherited from with a
`MovieWriterBase` sub-class.
'''
exec_key = 'animation.ffmpeg_path'
args_key = 'animation.ffmpeg_args'
@property
def output_args(self):
args = ['-vcodec', self.codec]
# For h264, the default format is yuv444p, which is not compatible
# with quicktime (and others). Specifying yuv420p fixes playback on
# iOS,as well as HTML5 video in firefox and safari (on both Win and
# OSX). Also fixes internet explorer. This is as of 2015/10/29.
if self.codec == 'h264' and '-pix_fmt' not in self.extra_args:
args.extend(['-pix_fmt', 'yuv420p'])
# The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
# kbps
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate])
if self.extra_args:
args.extend(self.extra_args)
for k, v in six.iteritems(self.metadata):
args.extend(['-metadata', '%s=%s' % (k, v)])
return args + ['-y', self.outfile]
@classmethod
def _handle_subprocess(cls, process):
_, err = process.communicate()
# Ubuntu 12.04 ships a broken ffmpeg binary which we shouldn't use
# NOTE : when removed, remove the same method in AVConvBase.
if 'Libav' in err.decode():
return False
return True
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(FFMpegBase, MovieWriter):
'''Pipe-based ffmpeg writer.
Frames are streamed directly to ffmpeg via a pipe and written in a single
pass.
'''
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-r', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
if not verbose.ge('debug'):
args += ['-loglevel', 'quiet']
args += ['-i', 'pipe:'] + self.output_args
return args
# Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FFMpegBase, FileMovieWriter):
'''File-based ffmpeg writer.
Frames are written to temporary files on disk and then stitched
together at the end.
'''
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
return [self.bin_path(), '-r', str(self.fps),
'-i', self._base_temp_name(),
'-vframes', str(self._frame_counter)] + self.output_args
# Base class of avconv information. AVConv has identical arguments to
# FFMpeg
class AVConvBase(FFMpegBase):
'''Mixin class for avconv output.
To be useful this must be multiply-inherited from with a
`MovieWriterBase` sub-class.
'''
exec_key = 'animation.avconv_path'
args_key = 'animation.avconv_args'
# NOTE : should be removed when the same method is removed in FFMpegBase.
@classmethod
def _handle_subprocess(cls, process):
return MovieWriter._handle_subprocess(process)
# Combine AVConv options with pipe-based writing
@writers.register('avconv')
class AVConvWriter(AVConvBase, FFMpegWriter):
'''Pipe-based avconv writer.
Frames are streamed directly to avconv via a pipe and written in a single
pass.
'''
# Combine AVConv options with file-based writing
@writers.register('avconv_file')
class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
'''File-based avconv writer.
Frames are written to temporary files on disk and then stitched
together at the end.
'''
# Base class of mencoder information. Contains configuration key information
# as well as arguments for controlling *output*
class MencoderBase(object):
exec_key = 'animation.mencoder_path'
args_key = 'animation.mencoder_args'
# Mencoder only allows certain keys, other ones cause the program
# to fail.
allowed_metadata = ['name', 'artist', 'genre', 'subject', 'copyright',
'srcform', 'comment']
# Mencoder mandates using name, but 'title' works better with ffmpeg.
# If we find it, just put it's value into name
def _remap_metadata(self):
if 'title' in self.metadata:
self.metadata['name'] = self.metadata['title']
@property
def output_args(self):
self._remap_metadata()
lavcopts = {'vcodec': self.codec}
if self.bitrate > 0:
lavcopts.update(vbitrate=self.bitrate)
args = ['-o', self.outfile, '-ovc', 'lavc', '-lavcopts',
':'.join(itertools.starmap('{0}={1}'.format,
lavcopts.items()))]
if self.extra_args:
args.extend(self.extra_args)
if self.metadata:
args.extend(['-info', ':'.join('%s=%s' % (k, v)
for k, v in six.iteritems(self.metadata)
if k in self.allowed_metadata)])
return args
# The message must be a single line; internal newlines cause sphinx failure.
mencoder_dep = ("Support for mencoder is only partially functional, "
"and will be removed entirely in 2.2. "
"Please use ffmpeg instead.")
@writers.register('mencoder')
class MencoderWriter(MovieWriter, MencoderBase):
@deprecated('2.0', message=mencoder_dep)
def __init__(self, *args, **kwargs):
with rc_context(rc={'animation.codec': 'mpeg4'}):
super(MencoderWriter, self).__init__(*args, **kwargs)
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(), '-', '-demuxer', 'rawvideo', '-rawvideo',
('w=%i:h=%i:' % self.frame_size +
'fps=%i:format=%s' % (self.fps,
self.frame_format))] + self.output_args
# Combine Mencoder options with temp file-based writing
@writers.register('mencoder_file')
class MencoderFileWriter(FileMovieWriter, MencoderBase):
supported_formats = ['png', 'jpeg', 'tga', 'sgi']
@deprecated('2.0', message=mencoder_dep)
def __init__(self, *args, **kwargs):
with rc_context(rc={'animation.codec': 'mpeg4'}):
super(MencoderFileWriter, self).__init__(*args, **kwargs)
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(),
'mf://%s*.%s' % (self.temp_prefix, self.frame_format),
'-frames', str(self._frame_counter), '-mf',
'type=%s:fps=%d' % (self.frame_format,
self.fps)] + self.output_args
# Base class for animated GIFs with convert utility
class ImageMagickBase(object):
'''Mixin class for ImageMagick output.
To be useful this must be multiply-inherited from with a
`MovieWriterBase` sub-class.
'''
exec_key = 'animation.convert_path'
args_key = 'animation.convert_args'
@property
def delay(self):
return 100. / self.fps
@property
def output_args(self):
return [self.outfile]
@classmethod
def _init_from_registry(cls):
if sys.platform != 'win32' or rcParams[cls.exec_key] != 'convert':
return
from six.moves import winreg
for flag in (0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY):
try:
hkey = winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE,
'Software\\Imagemagick\\Current',
0, winreg.KEY_QUERY_VALUE | flag)
binpath = winreg.QueryValueEx(hkey, 'BinPath')[0]
winreg.CloseKey(hkey)
binpath += '\\convert.exe'
break
except Exception:
binpath = ''
rcParams[cls.exec_key] = rcParamsDefault[cls.exec_key] = binpath
@classmethod
def isAvailable(cls):
'''
Check to see if a ImageMagickWriter is actually available.
Done by first checking the windows registry (if applicable) and then
running the commandline tool.
'''
bin_path = cls.bin_path()
if bin_path == "convert":
cls._init_from_registry()
return super(ImageMagickBase, cls).isAvailable()
ImageMagickBase._init_from_registry()
# Note: the base classes need to be in that order to get
# isAvailable() from ImageMagickBase called and not the
# one from MovieWriter. The latter is then called by the
# former.
@writers.register('imagemagick')
class ImageMagickWriter(ImageMagickBase, MovieWriter):
'''Pipe-based animated gif.
Frames are streamed directly to ImageMagick via a pipe and written
in a single pass.
'''
def _args(self):
return ([self.bin_path(),
'-size', '%ix%i' % self.frame_size, '-depth', '8',
'-delay', str(self.delay), '-loop', '0',
'%s:-' % self.frame_format]
+ self.output_args)
# Note: the base classes need to be in that order to get
# isAvailable() from ImageMagickBase called and not the
# one from MovieWriter. The latter is then called by the
# former.
@writers.register('imagemagick_file')
class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter):
'''File-based animated gif writer.
Frames are written to temporary files on disk and then stitched
together at the end.
'''
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
'%s*.%s' % (self.temp_prefix, self.frame_format)]
+ self.output_args)
# Taken directly from jakevdp's JSAnimation package at
# http://github.com/jakevdp/JSAnimation
def _included_frames(frame_list, frame_format):
"""frame_list should be a list of filenames"""
return INCLUDED_FRAMES.format(Nframes=len(frame_list),
frame_dir=os.path.dirname(frame_list[0]),
frame_format=frame_format)
def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
embedded = "\n"
for i, frame_data in enumerate(frame_list):
embedded += template.format(i, frame_format,
frame_data.replace('\n', '\\\n'))
return embedded
@writers.register('html')
class HTMLWriter(FileMovieWriter):
supported_formats = ['png', 'jpeg', 'tiff', 'svg']
args_key = 'animation.html_args'
@classmethod
def isAvailable(cls):
return True
def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None,
metadata=None, embed_frames=False, default_mode='loop',
embed_limit=None):
self.embed_frames = embed_frames
self.default_mode = default_mode.lower()
# Save embed limit, which is given in MB
if embed_limit is None:
self._bytes_limit = rcParams['animation.embed_limit']
else:
self._bytes_limit = embed_limit
# Convert from MB to bytes
self._bytes_limit *= 1024 * 1024
if self.default_mode not in ['loop', 'once', 'reflect']:
self.default_mode = 'loop'
warnings.warn("unrecognized default_mode: using 'loop'")
self._saved_frames = []
self._total_bytes = 0
self._hit_limit = False
super(HTMLWriter, self).__init__(fps, codec, bitrate,
extra_args, metadata)
def setup(self, fig, outfile, dpi, frame_dir=None):
if os.path.splitext(outfile)[-1] not in ['.html', '.htm']:
raise ValueError("outfile must be *.htm or *.html")
if not self.embed_frames:
if frame_dir is None:
frame_dir = outfile.rstrip('.html') + '_frames'
if not os.path.exists(frame_dir):
os.makedirs(frame_dir)
frame_prefix = os.path.join(frame_dir, 'frame')
else:
frame_prefix = None
super(HTMLWriter, self).setup(fig, outfile, dpi,
frame_prefix, clear_temp=False)
def grab_frame(self, **savefig_kwargs):
if self.embed_frames:
# Just stop processing if we hit the limit
if self._hit_limit:
return
suffix = '.' + self.frame_format
f = InMemory()
self.fig.savefig(f, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
imgdata64 = encodebytes(f.getvalue()).decode('ascii')
self._total_bytes += len(imgdata64)
if self._total_bytes >= self._bytes_limit:
warnings.warn("Animation size has reached {0._total_bytes} "
"bytes, exceeding the limit of "
"{0._bytes_limit}. If you're sure you want "
"a larger animation embedded, set the "
"animation.embed_limit rc parameter to a "
"larger value (in MB). This and further frames"
" will be dropped.".format(self))
self._hit_limit = True
else:
self._saved_frames.append(imgdata64)
else:
return super(HTMLWriter, self).grab_frame(**savefig_kwargs)
def _run(self):
# make a duck-typed subprocess stand in
# this is called by the MovieWriter base class, but not used here.
class ProcessStandin(object):
returncode = 0
def communicate(self):
return '', ''
self._proc = ProcessStandin()
# save the frames to an html file
if self.embed_frames:
fill_frames = _embedded_frames(self._saved_frames,
self.frame_format)
else:
# temp names is filled by FileMovieWriter
fill_frames = _included_frames(self._temp_names,
self.frame_format)
mode_dict = dict(once_checked='',
loop_checked='',
reflect_checked='')
mode_dict[self.default_mode + '_checked'] = 'checked'
interval = 1000 // self.fps
with open(self.outfile, 'w') as of:
of.write(JS_INCLUDE)
of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex,
Nframes=len(self._temp_names),
fill_frames=fill_frames,
interval=interval,
**mode_dict))
class Animation(object):
'''This class wraps the creation of an animation using matplotlib.
It is only a base class which should be subclassed to provide
needed behavior.
This class is not typically used directly.
Parameters
----------
fig : matplotlib.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
event_source : object, optional
A class that can run a callback when desired events
are generated, as well as be stopped and started.
Examples include timers (see :class:`TimedAnimation`) and file
system notifications.
blit : bool, optional
controls whether blitting is used to optimize drawing. Defaults
to ``False``.
See Also
--------
FuncAnimation, ArtistAnimation
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# First disconnect our draw event handler
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
# Now do any initial draw
self._init_draw()
# Add our callback for stepping the animation and
# actually start the event_source.
self.event_source.add_callback(self._step)
self.event_source.start()
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None):
'''Saves a movie file by drawing every frame.
Parameters
----------
filename : str
The output filename, e.g., :file:`mymovie.mp4`.
writer : :class:`MovieWriter` or str, optional
A `MovieWriter` instance to use or a key that identifies a
class to use, such as 'ffmpeg' or 'mencoder'. If ``None``,
defaults to ``rcParams['animation.writer']``.
fps : number, optional
Frames per second in the movie. Defaults to ``None``, which will use
the animation's specified interval to set the frames per second.
dpi : number, optional
Controls the dots per inch for the movie frames. This
combined with the figure's size in inches controls the size of
the movie. If ``None``, defaults to ``rcparam['savefig.dpi']``.
codec : str, optional
The video codec to be used. Not all codecs are supported by
a given :class:`MovieWriter`. If ``None``,
default to ``rcParams['animation.codec']``.
bitrate : number, optional
Specifies the number of bits used per second in the compressed
movie, in kilobits per second. A higher number means a higher
quality movie, but at the cost of increased file size. If ``None``,
defaults to ``rcParam['animation.bitrate']``.
extra_args : list, optional
List of extra string arguments to be passed to the
underlying movie utility. If ``None``, defaults to
``rcParams['animation.extra_args']``
metadata : Dict[str, str], optional
Dictionary of keys and values for metadata to include in
the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
extra_anim : list, optional
Additional `Animation` objects that should be included
in the saved movie file. These need to be from the same
`matplotlib.figure.Figure` instance. Also, animation frames will
just be simply combined, so there should be a 1:1 correspondence
between the frames from the different animations.
savefig_kwargs : dict, optional
Is a dictionary containing keyword arguments to be passed
on to the `savefig` command which is called repeatedly to
save the individual frames.
Notes
-----
fps, codec, bitrate, extra_args, metadata are used to
construct a :class:`MovieWriter` instance and can only be
passed if `writer` is a string. If they are passed as
non-`None` and ``writer`` is a :class:`MovieWriter`, a
`RuntimeError` will be raised.
'''
# If the writer is None, use the rc param to find the name of the one
# to use
if writer is None:
writer = rcParams['animation.writer']
elif (not isinstance(writer, six.string_types) and
any(arg is not None
for arg in (fps, codec, bitrate, extra_args, metadata))):
raise RuntimeError('Passing in values for arguments '
'fps, codec, bitrate, extra_args, or metadata '
'is not supported when writer is an existing '
'MovieWriter instance. These should instead be '
'passed as arguments when creating the '
'MovieWriter instance.')
if savefig_kwargs is None:
savefig_kwargs = {}
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# Re-use the savefig DPI for ours if none is given
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = self._fig.dpi
if codec is None:
codec = rcParams['animation.codec']
if bitrate is None:
bitrate = rcParams['animation.bitrate']
all_anim = [self]
if extra_anim is not None:
all_anim.extend(anim
for anim
in extra_anim if anim._fig is self._fig)
# If we have the name of a writer, instantiate an instance of the
# registered class.
if isinstance(writer, six.string_types):
if writer in writers.avail:
writer = writers[writer](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
else:
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers[writers.list()[0]](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install "
"ffmpeg to save animations.")
verbose.report('Animation.save using %s' % type(writer),
level='helpful')
if 'bbox_inches' in savefig_kwargs:
warnings.warn("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it may cause frame size "
"to vary, which is inappropriate for animation.")
savefig_kwargs.pop('bbox_inches')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existent use case or find a way to make it work.
with rc_context():
if rcParams['savefig.bbox'] == 'tight':
verbose.report("Disabling savefig.bbox = 'tight', as it "
"may cause frame size to vary, which "
"is inappropriate for animation.",
level='helpful')
rcParams['savefig.bbox'] = None
with writer.saving(self._fig, filename, dpi):
for anim in all_anim:
# Clear the initial frame
anim._init_draw()
for data in zip(*[a.new_saved_frame_seq()
for a in all_anim]):
for anim, d in zip(all_anim, data):
# TODO: See if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
writer.grab_frame(**savefig_kwargs)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'''Creates a new sequence of frame information.'''
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'''Creates a new sequence of saved/cached frame information.'''
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
if a in bg_cache:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, False)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
def to_html5_video(self, embed_limit=None):
'''Returns animation as an HTML5 video tag.
This saves the animation as an h264 video, encoded in base64
directly into the HTML5 video tag. This respects the rc parameters
for the writer as well as the bitrate. This also makes use of the
``interval`` to control the speed, and uses the ``repeat``
parameter to decide whether to loop.
'''
VIDEO_TAG = r'''<video {size} {options}>
<source type="video/mp4" src="data:video/mp4;base64,{video}">
Your browser does not support the video tag.
</video>'''
# Cache the rendering of the video as HTML
if not hasattr(self, '_base64_video'):
# Save embed limit, which is given in MB
if embed_limit is None:
embed_limit = rcParams['animation.embed_limit']
# Convert from MB to bytes
embed_limit *= 1024 * 1024
# First write the video to a tempfile. Set delete to False
# so we can re-open to read binary data.
with tempfile.NamedTemporaryFile(suffix='.m4v',
delete=False) as f:
# We create a writer manually so that we can get the
# appropriate size for the tag
Writer = writers[rcParams['animation.writer']]
writer = Writer(codec='h264',
bitrate=rcParams['animation.bitrate'],
fps=1000. / self._interval)
self.save(f.name, writer=writer)
# Now open and base64 encode
with open(f.name, 'rb') as video:
vid64 = encodebytes(video.read())
vid_len = len(vid64)
if vid_len >= embed_limit:
warnings.warn("Animation movie is {} bytes, exceeding "
"the limit of {}. If you're sure you want a "
"large animation embedded, set the "
"animation.embed_limit rc parameter to a "
"larger value (in MB).".format(vid_len,
embed_limit))
else:
self._base64_video = vid64.decode('ascii')
self._video_size = 'width="{}" height="{}"'.format(
*writer.frame_size)
# Now we can remove
os.remove(f.name)
# If we exceeded the size, this attribute won't exist
if hasattr(self, '_base64_video'):
# Default HTML5 options are to autoplay and display video controls
options = ['controls', 'autoplay']
# If we're set to repeat, make it loop
if hasattr(self, 'repeat') and self.repeat:
options.append('loop')
return VIDEO_TAG.format(video=self._base64_video,
size=self._video_size,
options=' '.join(options))
else:
return 'Video too large to embed.'
def to_jshtml(self, fps=None, embed_frames=True, default_mode=None):
"""Generate HTML representation of the animation"""
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000 / self._interval
# If we're not given a default mode, choose one base on the value of
# the repeat attribute
if default_mode is None:
default_mode = 'loop' if self.repeat else 'once'
if hasattr(self, "_html_representation"):
return self._html_representation
else:
# Can't open a second time while opened on windows. So we avoid
# deleting when closed, and delete manually later.
with tempfile.NamedTemporaryFile(suffix='.html',
delete=False) as f:
self.save(f.name, writer=HTMLWriter(fps=fps,
embed_frames=embed_frames,
default_mode=default_mode))
# Re-open and get content
with open(f.name) as fobj:
html = fobj.read()
# Now we can delete
os.remove(f.name)
self._html_representation = html
return html
def _repr_html_(self):
'''IPython display hook for rendering.'''
fmt = rcParams['animation.html']
if fmt == 'html5':
return self.to_html5_video()
elif fmt == 'jshtml':
return self.to_jshtml()
class TimedAnimation(Animation):
''':class:`Animation` subclass for time-based animation.
A new frame is drawn every *interval* milliseconds.
Parameters
----------
fig : matplotlib.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to ``None``.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to ``True``.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to ``False``.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source,
*args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
self._init_draw()
self.frame_seq = self.new_frame_seq()
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
return True
else:
return Animation._step(self, *args)
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
Animation._step(self)
class ArtistAnimation(TimedAnimation):
'''Animation using a fixed set of `Artist` objects.
Before creating an instance, all plotting should have taken place
and the relevant artists saved.
Parameters
----------
fig : matplotlib.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
artists : list
Each list entry a collection of artists that represent what
needs to be enabled on each frame. These will be disabled for
other frames.
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to ``None``.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to ``True``.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to ``False``.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
figs = set()
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
artist.set_animated(self._blit)
# Assemble a list of unique figures that need flushing
if artist.get_figure() not in figs:
figs.add(artist.get_figure())
# Flush the needed figures
for fig in figs:
fig.canvas.draw_idle()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function ``func``.
Parameters
----------
fig : matplotlib.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
func : callable
The function to call at each frame. The first argument will
be the next value in ``frames``. Any additional positional
arguments can be supplied via the ``fargs`` parameter.
The required signature is::
def func(frame, *fargs) -> iterable_of_artists:
frames : iterable, int, generator function, or None, optional
Source of data to pass ``func`` and each frame of the animation
If an iterable, then simply use the values provided. If the
iterable has a length, it will override the ``save_count`` kwarg.
If an integer, then equivalent to passing ``range(frames)``
If a generator function, then must have the signature::
def gen_function() -> obj:
If ``None``, then equivalent to passing ``itertools.count``.
In all of these cases, the values in *frames* is simply passed through
to the user-supplied *func* and thus can be of any type.
init_func : callable, optional
A function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence
will be used. This function will be called once before the
first frame.
If ``blit == True``, ``init_func`` must return an iterable of artists
to be re-drawn.
The required signature is::
def init_func() -> iterable_of_artists:
fargs : tuple or None, optional
Additional arguments to pass to each call to *func*.
save_count : int, optional
The number of values from *frames* to cache.
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to ``None``.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to ``True``.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to ``False``.
'''
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif iterable(frames):
self._iter_gen = lambda: iter(frames)
if hasattr(frames, '__len__'):
self.save_count = len(frames)
else:
self._iter_gen = lambda: iter(xrange(frames))
self.save_count = frames
if self.save_count is None:
# If we're passed in and using the default, set save_count to 100.
self.save_count = 100
else:
# itertools.islice returns an error when passed a numpy int instead
# of a native python int (http://bugs.python.org/issue30537).
# As a workaround, convert save_count to a native python int.
self.save_count = int(self.save_count)
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
# While iterating we are going to update _save_seq
# so make a copy to safely iterate over
self._old_saved_seq = list(self._save_seq)
return iter(self._old_saved_seq)
else:
return itertools.islice(self.new_frame_seq(), self.save_count)
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(next(self.new_frame_seq()))
else:
self._drawn_artists = self._init_func()
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The init_func must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
self._save_seq = []
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count
# around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The animation function must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
| mit |
antiface/mne-python | examples/visualization/plot_topo_channel_epochs_image.py | 22 | 1861 | """
============================================================
Visualize channel over epochs as images in sensor topography
============================================================
This will produce what is sometimes called event related
potential / field (ERP/ERF) images.
One sensor topography plot is produced with the evoked field images from
the selected channels.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event related fields images
layout = mne.find_layout(epochs.info, 'meg') # use full layout
title = 'ERF images - MNE sample data'
mne.viz.plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title)
plt.show()
| bsd-3-clause |
stggh/PyAbel | examples/example_linbasex.py | 2 | 3486 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import os
import bz2
import matplotlib.pylab as plt
# This example demonstrates ``linbasex`` inverse Abel transform
# of a velocity-map image of photoelectrons from O2- photodetachment at 454 nm.
# Measured at The Australian National University
# J. Chem. Phys. 133, 174311 (2010) DOI: 10.1063/1.3493349
# Load image as a numpy array - numpy handles .gz, .bz2
imagefile = bz2.BZ2File('data/O2-ANU1024.txt.bz2')
IM = np.loadtxt(imagefile)
if os.environ.get('READTHEDOCS', None) == 'True':
IM = IM[::2,::2]
# the [::2, ::2] reduces the image size x1/2, decreasing processing memory load
# for the online readthedocs.org
# Image center should be mid-pixel and the image square,
# `center=convolution` takes care of this
un = [0, 2] # spherical harmonic orders
proj_angles = np.arange(0, 2*np.pi, np.pi/20) # projection angles
# adjust these parameter to 'improve' the look
smoothing = 0.9 # smoothing Gaussian 1/e width
threshold = 0.01 # exclude small amplitude Newton spheres
# no need to change these
radial_step = 1
clip = 0
# linbasex inverse Abel transform
LIM = abel.Transform(IM, method="linbasex", center="convolution",
center_options=dict(square=True),
transform_options=dict(basis_dir=None, return_Beta=True,
legendre_orders=un,
proj_angles=proj_angles,
smoothing=smoothing,
radial_step=radial_step, clip=clip,
threshold=threshold))
# angular, and radial integration - direct from `linbasex` transform
# as class attributes
radial = LIM.radial
speed = LIM.Beta[0]
anisotropy = LIM.Beta[1]
# normalize to max intensity peak i.e. max peak height = 1
speed /= speed[200:].max() # exclude transform noise near centerline of image
# plots of the analysis
fig = plt.figure(figsize=(11, 5))
ax1 = plt.subplot2grid((1, 2), (0, 0))
ax2 = plt.subplot2grid((1, 2), (0, 1))
# join 1/2 raw data : 1/2 inversion image
inv_IM = LIM.transform
cols = inv_IM.shape[1]
c2 = cols//2
vmax = IM[:, :c2-100].max()
inv_IM *= vmax/inv_IM[:, c2+100:].max()
JIM = np.concatenate((IM[:, :c2], inv_IM[:, c2:]), axis=1)
# raw data
im1 = ax1.imshow(JIM, origin='upper', aspect='auto', vmin=0, vmax=vmax)
ax1.set_xlabel('column (pixels)')
ax1.set_ylabel('row (pixels)')
ax1.set_title('VMI, inverse Abel: {:d}x{:d}'.format(*inv_IM.shape),
fontsize='small')
# Plot the 1D speed distribution and anisotropy parameter ("looks" better
# if multiplied by the intensity)
ax2.plot(radial, speed, label='speed')
ax2.plot(radial, speed*anisotropy, label=r'anisotropy $\times$ speed')
ax2.set_xlabel('radial pixel')
row, cols = IM.shape
ax2.axis(xmin=100*cols/1024, xmax=500*cols/1024, ymin=-1.5, ymax=1.8)
ax2.set_title("speed, anisotropy parameter", fontsize='small')
ax2.set_ylabel('intensity')
ax2.set_xlabel('radial coordinate (pixels)')
plt.legend(loc='best', frameon=False, labelspacing=0.1, fontsize='small')
plt.suptitle(
r'linbasex inverse Abel transform of O$_{2}{}^{-}$ electron velocity-map image',
fontsize='larger')
# Save a image of the plot
plt.savefig("plot_example_linbasex.png", dpi=100)
# Show the plots
plt.show()
| mit |
alexlee-gk/visual_dynamics | visual_dynamics/gui/realtime_plotter.py | 1 | 2853 | """
Realtime Plotter
The Realtime Plotter expects to be constantly given values to plot in realtime.
It assumes the values are an array and plots different indices at different
colors according to the spectral colormap.
"""
import matplotlib.gridspec as gridspec
import matplotlib.pylab as plt
import numpy as np
from visual_dynamics.gui.util import buffered_axis_limits
class RealtimePlotter(object):
def __init__(self, fig, gs, time_window=500, labels=None, alphas=None):
self._fig = fig
self._gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs)
self._ax = plt.subplot(self._gs[0])
self._time_window = time_window
self._labels = labels
self._alphas = alphas
self._init = False
if self._labels:
self.init(len(self._labels))
self._fig.canvas.draw()
self._fig.canvas.flush_events() # Fixes bug with Qt4Agg backend
def init(self, data_len):
"""
Initialize plots based off the length of the data array.
"""
self._t = 0
self._data_len = data_len
self._data = np.empty((0, data_len))
cm = plt.get_cmap('spectral')
self._plots = []
for i in range(data_len):
color = cm(1.0 * i / data_len)
alpha = self._alphas[i] if self._alphas is not None else 1.0
label = self._labels[i] if self._labels is not None else str(i)
self._plots.append(
self._ax.plot([], [], color=color, alpha=alpha, label=label)[0]
)
self._ax.set_xlim(0, self._time_window)
self._ax.set_ylim(0, 1)
self._ax.legend(loc='upper left', bbox_to_anchor=(0, 1.15))
self._init = True
def update(self, x):
"""
Update the plots with new data x. Assumes x is a one-dimensional array.
"""
x = np.ravel([x])
if not self._init:
self.init(x.shape[0])
assert x.shape[0] == self._data_len
x = x.reshape((1, self._data_len))
self._t += 1
self._data = np.append(self._data, x, axis=0)
t, tw = self._t, self._time_window
t0, tf = (0, t) if t < tw else (t - tw, t)
for i in range(self._data_len):
self._plots[i].set_data(np.arange(t0, tf), self._data[t0:tf, i])
x_range = (0, tw) if t < tw else (t - tw, t)
self._ax.set_xlim(x_range)
y_min, y_max = np.amin(self._data[t0:tf, :]), np.amax(self._data[t0:tf, :])
self._ax.set_ylim(buffered_axis_limits(y_min, y_max, buffer_factor=1.25))
self.draw()
def draw(self):
self._ax.draw_artist(self._ax.patch)
for plot in self._plots:
self._ax.draw_artist(plot)
self._fig.canvas.draw()
self._fig.canvas.flush_events() # Fixes bug with Qt4Agg backend
| mit |
xubenben/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
f3r/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
tdhopper/scikit-learn | sklearn/metrics/pairwise.py | 21 | 44042 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
CMartelLML/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
metpy/MetPy | setup.py | 1 | 3373 | # Copyright (c) 2008,2010,2015,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Setup script for installing MetPy."""
from __future__ import print_function
from setuptools import find_packages, setup
import versioneer
ver = versioneer.get_version()
setup(
name='MetPy',
version=ver,
description='Collection of tools for reading, visualizing and'
'performing calculations with weather data.',
long_description='The space MetPy aims for is GEMPAK '
'(and maybe NCL)-like functionality, in a way that '
'plugs easily into the existing scientific Python '
'ecosystem (numpy, scipy, matplotlib).',
url='http://github.com/Unidata/MetPy',
author='Ryan May, Patrick Marsh, Sean Arms, Eric Bruning',
author_email='[email protected]',
maintainer='MetPy Developers',
maintainer_email='[email protected]',
license='BSD',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License'],
keywords='meteorology weather',
packages=find_packages(exclude=['doc', 'examples']),
package_data={'metpy.plots': ['colortable_files/*.tbl', 'nexrad_tables/*.tbl',
'fonts/*.ttf', '_static/metpy_75x75.png',
'_static/metpy_150x150.png', '_static/unidata_75x75.png',
'_static/unidata_150x150.png'],
'metpy': ['static-data-manifest.txt']},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*',
install_requires=['matplotlib>=2.0.0', 'numpy>=1.12.0', 'scipy>=0.17.0',
'pint', 'xarray>=0.10.7', 'enum34;python_version<"3.4"',
'contextlib2;python_version<"3.6"',
'pooch>=0.1', 'traitlets>=4.3.0', 'pandas>=0.22.0'],
extras_require={
'dev': ['ipython[all]>=3.1'],
'doc': ['sphinx>=1.8', 'sphinx-gallery>=0.4', 'doc8', 'm2r',
'netCDF4'],
'examples': ['cartopy>=0.13.1', 'matplotlib>=2.2.0', 'pyproj>=1.9.4,!=2.0.0'],
'test': ['pytest>=2.4', 'pytest-runner', 'pytest-mpl', 'pytest-flake8',
'cartopy>=0.16.0', 'flake8>3.2.0', 'flake8-builtins!=1.4.0',
'flake8-comprehensions', 'flake8-copyright',
'flake8-docstrings', 'flake8-import-order', 'flake8-mutable',
'flake8-pep3101', 'flake8-print', 'flake8-quotes', 'flake8-rst-docstrings',
'pep8-naming', 'netCDF4', 'pyproj>=1.9.4,!=2.0.0']
},
cmdclass=versioneer.get_cmdclass(),
zip_safe=True,
download_url='https://github.com/Unidata/MetPy/archive/v{}.tar.gz'.format(ver), )
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/linear_model/bayes.py | 14 | 15225 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
openpathsampling/openpathsampling | docs/conf.py | 2 | 11183 | # -*- coding: utf-8 -*-
#
# OpenTIS documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 9 12:08:24 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
# we use these to get the version
import pkg_resources
import packaging.version
import openpathsampling
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('../openpathsampling/'))
#sys.path.append(os.path.abspath('_themes'))
# -- Copying examples over into the docs/examples -------------------------
try:
shutil.copytree(os.path.abspath("../examples/alanine_dipeptide_tps"),
os.path.abspath("examples/alanine_dipeptide_tps"))
except OSError:
pass # there should be a backup here....
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.ifconfig',
# 'IPython.sphinxext.ipython_console_highlighting',
# 'IPython.sphinxext.ipython_directive',
# 'matplotlib.sphinxext.plot_directive',
# 'numpydoc'
# 'sphinxcontrib.napoleon'
'sphinx.ext.napoleon',
'nbsphinx'
]
#
nbsphinx_execute = "never"
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# add pandoc directives
pandoc_from = ['markdown', 'mediawiki']
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members', 'imported-members']
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions.append('notebook_sphinxext')
extensions.append('pandoc_sphinxext')
# Numpydoc options
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenPathSampling'
copyright = u'2014-2021, David W.H. Swenson, Jan-Hendrik Prinz, John Chodera, Peter Bolhuis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = pkg_resources.get_distribution('openpathsampling').version
# The short X.Y version.
# version = packaging.version.Version(release).base_version
version = release # prefer to have the .dev0 label on 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'wip', 'guides']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_options = {
'logo_only': True
}
#html_theme_path = ['_themes']
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Activate the theme.
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#html_theme = 'haiku'
#html_theme_path = ['_themes']
#html_theme = 'kr'
#html_theme_path = [alabaster.get_path()]
#extensions = ['alabaster']
#html_theme = 'alabaster'
#html_sidebars = {
# '**': [
# 'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
# ]
#}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logo/logo+whitetext.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# from http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
'_static/ipynb.css', # tweak output of
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenPathSamplingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'OpenPathSampling.tex', u'OpenPathSampling Documentation',
u'Jan-Hendrik Prinz, John Chodera, David W.H. Swenson, Peter Bolhuis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openpathsampling', u'OpenPathSampling Documentation',
[u'Jan-Hendrik Prinz, John Chodera, David W.H. Swenson, Peter Bolhuis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenPathSampling', u'OpenPathSampling Documentation',
u'Jan-Hendrik Prinz, John Chodera, David W.H. Swenson, Peter Bolhuis', 'OpenTIS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
kwikteam/global_superclustering | global_code/emlaunch_synthetic_test.py | 1 | 1570 | #Be sure to run
#python setup.py build_ext --inplace
#before running this script
import pickle
import numpy as np
import matplotlib.pyplot as plt
import sorting
from supercluster import *
from klustakwik2 import *
import imp # lets you reload modules using e.g.imp.reload(sorting)
from IPython import embed
import time
from emcat import KK
from default_parameters import default_parameters
import testing_cat as tc
script_params = default_parameters.copy()
#script_params.update(
# run_monitoring_server=False,
# debug=True,
# )
picklefile = '/home/skadir/globalphy/global_superclustering/global_code/synthetic_cat.p'
pkl_file = open(picklefile,'rb')
mixture = pickle.load(pkl_file)
pkl_file.close()
#embed()
mixture_dict = mixture[0]
num_starting_clusters = 15
num_spikes = mixture_dict['superclusters'].shape[0]
initclust = tc.generate_random_initial_clustering(num_starting_clusters, num_spikes )
#superdata used to be called silly
superdata = sorting.sparsify_superclusters(mixture_dict['superclusters'])
outsparse = superdata.to_sparse_data() #don't need to write the outsil variable, everything is stored within the sparse class
distdata = superdata.supercluster_distribution()
start_time = time.time()
[clust10, dic10] = superdata.clump_fine_clustering(10)
time_taken_clump = time.time()-start_time
print('Time taken for clump clustering %.2f s' %(time_taken_clump))
kk = KK(outsparse,**script_params)
#kk.cluster_from(clust10)
kk.cluster_from(initclust)
#Automatically create clust100 via
#kk.cluster_hammingmask_starts(100)
embed()
| gpl-2.0 |
reinaH/osf.io | tasks.py | 9 | 23940 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, live=False):
"""Run the API server."""
cmd = 'python manage.py runserver {}'.format(port)
if live:
cmd += ' livereload'
run(cmd, echo=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug"):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
run(bin_prefix(cmd))
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
def pip_install(req_file):
"""Return the proper 'pip install' command for installing the dependencies
defined in ``req_file``.
"""
cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file))
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
return cmd
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False):
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if not addons:
return
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
| apache-2.0 |
aernesto/change-rate-inference | sims_learning_rate/scripts/visualize_fbdata.py | 1 | 12620 | """
The aim of this script is to visualize the data stored in the SQLite db from sims
The questions that this script should answer are:
1- list existing fields in the DB
2- list unique values in each field
3- count how many trials exist per triplet (duration, h, SNR)
4- for fixed triplet, compute the average difference (across trials) between the posterior means
do the same for posterior stdev
5- find the top 10 triplets for which the differences from previous bullet are maximized
"""
import numpy as np
import dataset
import matplotlib.pyplot as plt
# Debug mode
debug = True
def printdebug(debugmode, string=None, vartuple=None):
"""
prints string, varname and var for debug purposes
:param debugmode: True or False
:param string: Custom message useful for debugging
:param vartuple: Tuple (varname, var), where:
:varname: string representing name of variable to display
:var: actual Python variable to print on screen
:return:
"""
if debugmode:
print('-------------------------')
if string is None:
pass
else:
print(string)
if vartuple is None:
pass
else:
print(vartuple[0], '=', vartuple[1])
print('-------------------------')
# get connection to SQLite database where results are stored
dbname = 'true_5.db'
tablename = 'feedback'
db = dataset.connect('sqlite:///' + dbname)
table = db[tablename]
def list_fields():
"""
:return: list existing fields in the table
"""
print(table.columns)
def list_unique(fields, limit=None):
"""
:param fields: list of strings containing field names from SQLite database
:return: list unique values in each field from fields
"""
for field in fields:
# print('unique values from ' + field)
if limit is None:
for thisrow in table.distinct(field):
print(thisrow[field])
else:
counter = 0
for thisrow in table.distinct(field):
print(thisrow[field])
counter += 1
if counter == limit:
break
print('-----------------------------------')
def list_triplets(prints=True):
"""
:param prints: True to get printed output, False if only the count is desired
:return: prints the distinct values of each triplet (duration, h, SNR) if prints = True
returns the count of these distinct triplets
"""
result0 = db.query('SELECT trialDuration, hazardRate, SNR, COUNT(trialNumber) AS c \
FROM feedback GROUP BY trialDuration, SNR, hazardRate')
count = 0
for row in result0:
count += 1
if prints:
print('triplet:', row['trialDuration'], round(row['hazardRate'], 3), round(row['SNR'], 3))
print('count:', row['c'])
print('-----------------------------------')
printdebug(debugmode=not debug, vartuple=('nb of distinct triplets', count))
return count
def analyze_diff(typediff='new'):
"""
:param typediff: Either 'old' or 'new', depending on whether the table
analyzed was before or after true_3.db
:return: numpy array "array_results" with, as many rows as there are distinct triplets,
(trialDuration, hazardRate, SNR)
Columns are as follows:
column0: trialDuration
column1: hazardRate
column2: SNR
column3: difference of means, averaged across trials
column4: difference of stdev, averaged across trials
column5: standard deviation of the difference of means
column6: standard deviation of the difference of stdev
column7: Coefficient of variation for abs(difference of means)
column8: Coefficient of variation for abs(difference of stdev)
column9: sample size
"""
if typediff == 'old':
result1 = db.query('SELECT trialDuration, hazardRate, SNR, '
'(meanFeedback - meanNoFeedback) as meandiff, '
'(stdevFeedback - stdevNoFeedback) as stdevdiff '
'FROM feedback')
elif typediff == 'new':
result1 = db.query('SELECT trialDuration, hazardRate, SNR, meandiff, '
'stdevdiff FROM feedback')
elif typediff == 'abs':
result1 = db.query('SELECT trialDuration, hazardRate, SNR, absmeandiff AS meandiff, '
'absstdevdiff AS stdevdiff FROM feedback')
# store results in numpy array
'''
====
Recall formula for running average
(https://stackoverflow.com/questions/28820904/how-to-efficiently-compute-average-on-the-fly-moving-average)
n=1;
curAvg = 0;
loop{
curAvg = curAvg + (newNum - curAvg)/n;
n++;
}
====
Recall formula for running variance
(https://www.johndcook.com/blog/standard_deviation/)
Initialize M1 = x1 and S1 = 0.
For subsequent x‘s, use the recurrence formulas
Mk = Mk-1+ (xk – Mk-1)/k -- this is exactly the running average
Sk = Sk-1 + (xk – Mk-1)*(xk – Mk).
For 2 ≤ k ≤ n, the kth estimate of the variance is s2 = Sk/(k – 1).
'''
array_results = np.zeros((list_triplets(prints=False), 10))
array_row = -1
lasttriplet = (0, 0, 0)
nsamples = 1
run_meandiff_avg, run_meandiff_var, run_stdevdiff_avg, run_stdevdiff_var = (0, 0, 0, 0)
run_absmeandiff_avg, run_absmeandiff_var, run_absstdevdiff_avg, run_absstdevdiff_var = (0, 0, 0, 0)
ccc = 0
for row in result1:
ccc += 1
printdebug(debugmode=not debug, vartuple=("iteration, ", ccc))
newtriplet = (int(row['trialDuration']), row['hazardRate'], row['SNR'])
if newtriplet != lasttriplet:
if nsamples > 1:
nsamples -= 1 # to correct for last incorrect increment
coef = nsamples - 1
# compute CVs to store
std_absmeandiff = np.sqrt(run_meandiff_var / coef)
if run_absmeandiff_avg > 1e-6:
cv_meandiff = std_absmeandiff / run_absmeandiff_avg
else:
cv_meandiff = np.nan
std_absstdevdiff = np.sqrt(run_stdevdiff_var / coef)
if run_absstdevdiff_avg > 1e-6:
cv_stdevdiff = std_absstdevdiff / run_absstdevdiff_avg
else:
cv_stdevdiff = np.nan
# store
array_results[array_row, 3:10] = (run_meandiff_avg,
run_stdevdiff_avg,
np.sqrt(run_meandiff_var / coef),
np.sqrt(run_stdevdiff_var / coef),
cv_meandiff,
cv_stdevdiff,
nsamples)
nsamples = 1
array_row += 1
# fill in first 3 columns
array_results[array_row, 0:3] = newtriplet
lasttriplet = newtriplet
# compute running averages
mean_aux_diff = row['meandiff'] - run_meandiff_avg
var_aux_diff = row['stdevdiff'] - run_stdevdiff_avg
run_meandiff_avg += mean_aux_diff / nsamples
run_stdevdiff_avg += var_aux_diff / nsamples
mean_aux_absdiff = abs(row['meandiff']) - run_absmeandiff_avg
var_aux_absdiff = abs(row['stdevdiff']) - run_absstdevdiff_avg
run_absmeandiff_avg += mean_aux_absdiff / nsamples
run_absstdevdiff_avg += var_aux_absdiff / nsamples
# compute running variances
if nsamples > 1:
run_meandiff_var += mean_aux_diff * (row['meandiff'] - run_meandiff_avg)
run_stdevdiff_var += var_aux_diff * (row['stdevdiff'] - run_stdevdiff_avg)
run_absmeandiff_var += mean_aux_absdiff * (abs(row['meandiff']) - run_absmeandiff_avg)
run_absstdevdiff_var += var_aux_absdiff * (abs(row['stdevdiff']) - run_absstdevdiff_avg)
nsamples += 1
return array_results
def plot_hist_cv(array, lastfigure):
"""
:param array: array returned by analyze_diff()
:param lastfigure: integer to generate figure numbers
:return: two histograms for the CV of the absolute values of the differences
between posterior means; and between posterior stdevs.
"""
lastfigure += 1
plt.figure(lastfigure)
absmeans = array[:, 7]
absmeans = absmeans[~np.isnan(absmeans)]
plt.hist(absmeans, bins='auto')
plt.title('CV diff means')
plt.xlabel('CV of absolute diff between posterior means')
plt.ylabel('count out of 100')
lastfigure += 1
plt.figure(lastfigure)
absstdev = array[:, 8]
absstdev = absstdev[~np.isnan(absstdev)]
plt.hist(absstdev, bins='auto')
plt.title('CV diff std')
plt.xlabel('CV of absolute diff between posterior stdev')
plt.ylabel('count out of 100')
plt.show()
def plots1d(array, fixed_vars, lastfigure):
"""
:param array: numpy array returned by analyze_diff()
:param fixed_vars: dict with two key-value pairs.
key: one of 'SNR', 'hazardRate', 'trialDuration'
value: an appropriate value existing in the database
:param lastfigure: integer to generate figure numbers
:return: four errorbar plots for the mean (error bars represent 1 stdev) of:
- the difference between posterior means;
- the difference between posterior stdevs;
- the absolute difference between posterior means;
- the absolute difference between posterior stdevs.
"""
keylist = list(fixed_vars.keys())
valuelist = list(fixed_vars.values())
# pass array to an equivalent SQLite database on which the query may be run
# get connection to SQLite database where results are stored
db_aux = dataset.connect('sqlite:///:memory:')
table_aux = db_aux[tablename]
# fill in the database row by row from the numpy array
for array_row in np.arange(array.shape[0]):
elts = array[array_row, :]
table_aux.insert({'trialDuration': elts[0],
'hazardRate': elts[1],
'SNR': elts[2],
'meanMeandiff': elts[3],
'meanStdevdiff': elts[4],
'stdMeandiff': elts[5],
'stdStdevdiff': elts[6],
'cv1': elts[7],
'cv2': elts[8],
'nsamples': elts[9]})
# get the free variable name (indepvar)
if 'SNR' not in keylist:
indepvarname = 'SNR'
elif 'trialDuration' not in keylist:
indepvarname = 'trialDuration'
elif 'hazardRate' not in keylist:
indepvarname = 'hazardRate'
printdebug(debugmode=debug, vartuple=('indepvarname', indepvarname))
result2 = db_aux.query('SELECT meanMeandiff, stdMeandiff, meanStdevdiff, \
stdStdevdiff, {} FROM feedback WHERE {} = {} AND {} = {}'.format(indepvarname,
keylist[0],
valuelist[0],
keylist[1],
valuelist[1]))
indepvar = []
means = indepvar.copy()
stdevs = indepvar.copy()
err_means = indepvar.copy()
err_stdevs = indepvar.copy()
for row in result2:
indepvar += [row[indepvarname]]
means += [row['meanMeandiff']]
stdevs += [row['meanStdevdiff']]
err_means += [row['stdMeandiff']]
err_stdevs += [row['stdStdevdiff']]
lastfigure += 1
plt.figure(lastfigure)
plt.errorbar(indepvar, means, yerr=err_means)
plt.title("avg diff in means as fcn of " + indepvarname)
plt.xlabel(indepvarname)
lastfigure += 1
plt.figure(lastfigure)
plt.errorbar(indepvar, stdevs, yerr=err_stdevs)
plt.title("avg diff in var as fcn of " + indepvarname)
plt.xlabel(indepvarname)
plt.show()
if __name__ == "__main__":
fignum = 0
# list_fields()
# list_unique(['meandiff', 'absmeandiff'], limit=10)
# list_triplets()
simdata = analyze_diff(typediff='new')
# plot_hist_cv(simdata, fignum)
plots1d(simdata, {'trialDuration': 50, 'SNR': 0.2}, fignum)
| mit |
astroCV/astroCV | galaxy_detection/training/02_downloadfits.py | 1 | 1489 | #!/usr/bin/env python
from astroquery.sdss import SDSS
from astropy import coordinates as coords
from astropy.io import fits
import numpy as np
from PIL import Image
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from astropy.table import Table,vstack,Column,unique
import copy
import os.path
#DOWNLOAD SDSS FITS IMAGES WHERE WE FOUND AT LEAST 1 ZOO GALAXY
xid = Table.read('zoospecnewall2.data',format='ascii')
xuniq = Table.read('unique.data',format='ascii')
ngal=len(xid)
nuniq=len(xuniq)
print(ngal,nuniq)
#1586 is corrupted!
istart=0
iend=1000 #number of images to download for training (~230k)
for k in range(iend-istart):
i=k+istart
run=xuniq['run'][i]
rerun=xuniq['rerun'][i]
camcol=xuniq['camcol'][i]
field=xuniq['field'][i]
fnamer='images/%s.r.fits'%(xuniq['imagename'][i])
fnameg='images/%s.g.fits'%(xuniq['imagename'][i])
fnamei='images/%s.i.fits'%(xuniq['imagename'][i])
if os.path.isfile(fnamer):
print('i=%d file exist %s'%(i,fnamer))
if not os.path.isfile(fnamer):
im = SDSS.get_images(run=run,rerun=rerun,camcol=camcol,field=field,band=['r','g','i'],timeout=120)
if len(im) < 3:
print('i=%d no data file skip!!!!!'%(i))
if len(im) >= 3:
imr=im[0]
img=im[1]
imi=im[2]
imr.writeto(fnamer)
img.writeto(fnameg)
imi.writeto(fnamei)
print('i=%d downloaded %s'%(i,fnamer))
# if i%10==0:
# print('i=%d'%i)
| bsd-2-clause |
gviejo/ThalamusPhysio | python/main_test_mutual_information_noSWS.py | 1 | 7958 | import ternary
import numpy as np
import pandas as pd
from functions import *
import sys
from functools import reduce
from sklearn.manifold import *
from sklearn.cluster import *
from pylab import *
import _pickle as cPickle
from skimage.filters import gaussian
############################################################################################################
# LOADING DATA
############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
burstiness = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
lambdaa = pd.read_hdf("/mnt/DataGuillaume/MergedData/LAMBDA_AUTOCORR.h5")[('rem', 'b')]
lambdaa = lambdaa[np.logical_and(lambdaa>0.0,lambdaa<30.0)]
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = pd.MultiIndex.from_product([['wak', 'rem'], ['phase', 'pvalue', 'kappa']]),
data = np.hstack((theta_mod['wake'], theta_mod['rem'])))
theta = theta.dropna()
rippower = pd.read_hdf("../figures/figures_articles/figure2/power_ripples_2.h5")
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
swr_phase = pd.read_hdf("/mnt/DataGuillaume/MergedData/SWR_PHASE.h5")
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:20]
autocorr_rem = autocorr_rem[2:20]
autocorr_sws = autocorr_sws[2:20]
store_autocorr.close()
############################################################################################################
# WHICH NEURONS
############################################################################################################
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
neurons = np.intersect1d(swr.dropna(1).columns.values, autocorr_sws.dropna(1).columns.values)
neurons = np.intersect1d(neurons, fr_index)
from sklearn.decomposition import PCA
n_shuffling = 1000
shufflings = pd.DataFrame(index = np.arange(n_shuffling), columns = ['sws', 'rem', 'wak', 'wak-rem'])
det_all = pd.Series(index = ['sws', 'rem', 'wak', 'wak-rem'])
combi = { 'sws':autocorr_sws[neurons].values,
'rem':autocorr_rem[neurons].values,
'wak':autocorr_wak[neurons].values,
'wak-rem':np.vstack((autocorr_wak[neurons].values,autocorr_rem[neurons].values))
}
for k in combi:
shuffling = []
for j in range(n_shuffling):
print(k, j)
X = np.copy(swr[neurons].values.T)
Y = np.copy(combi[k]).T
Y = Y - Y.mean(1)[:,np.newaxis]
Y = Y / Y.std(1)[:,np.newaxis]
np.random.shuffle(X)
np.random.shuffle(Y)
pc_swr = PCA(n_components=10).fit_transform(X)
pc_aut = PCA(n_components=10).fit_transform(Y)
All = np.hstack((pc_swr, pc_aut))
corr = np.corrcoef(All.T)
d = np.linalg.det(corr)
shuffling.append(d)
X = np.copy(swr[neurons].values.T)
Y = np.copy(combi[k]).T
Y = Y - Y.mean(1)[:,np.newaxis]
Y = Y / Y.std(1)[:,np.newaxis]
pc_swr = PCA(n_components=10).fit_transform(X)
pc_aut = PCA(n_components=10).fit_transform(Y)
All = np.hstack((pc_swr, pc_aut))
corr = np.corrcoef(All.T)
d_swr_auto = np.linalg.det(corr)
det_all.loc[k] = d_swr_auto
shufflings.loc[:,k] = pd.Series(np.array(shuffling))
sys.exit()
store = pd.HDFStore("../figures/figures_articles_v2/figure6/determinant_corr_noSWS.h5", 'w')
store.put('det_all', det_all)
store.put('shufflings', shufflings)
store.close()
# Ecorr = np.mean(np.power(corr[0:10,10:], 2.0))
# a = pc_swr.T.dot(pc_aut)
# v = np.atleast_2d(np.std(pc_swr,0)).T.dot(np.atleast_2d(np.std(pc_aut,0)))
# c = (a/v)
# shuffling.append(np.abs(np.linalg.det(c)))
# shuffling = np.array(shuffling)
# X = np.copy(swr[neurons].values.T)
# Y = np.copy(autocorr_sws[neurons].values.T)
# pc_swr = PCA(n_components=10).fit_transform(X)
# pc_aut = PCA(n_components=10).fit_transform(Y)
# #var
# a = pc_swr.T.dot(pc_aut)
# v = np.atleast_2d(np.std(pc_swr,0)).T.dot(np.atleast_2d(np.std(pc_aut,0)))
# c = (a/v)
# real = np.abs(np.linalg.det(c))
# # per nucleus
# det = pd.DataFrame(index = nucleus, columns = ['det'])
# groups = mappings.loc[neurons].groupby('nucleus').groups
# for n in nucleus:
# X = np.copy(swr[groups[n]].values.T)
# Y = np.copy(autocorr_sws[groups[n]].values.T)
# pc_swr = PCA(n_components=10).fit_transform(X)
# pc_aut = PCA(n_components=10).fit_transform(Y)
# det.loc[n] = np.linalg.det(pc_swr.T.dot(pc_aut))
# pc_swr = PCA(n_components=10).fit_transform(X)
# pc_aut = PCA(n_components=10).fit_transform(Y)
# # 1. Det All
# All = np.hstack((pc_swr, pc_aut))
# corr = np.corrcoef(All.T)
# real = np.linalg.det(corr)
# Ecorr = np.mean(np.power(corr[0:10,10:], 2.0))
# shuffling = []
# for i in range(200):
# X = np.copy(swr[neurons].values.T)
# Y = np.copy(autocorr_sws[neurons].values.T)
# np.random.shuffle(X)
# # np.random.shuffle(Y)
# pc_swr = PCA(n_components=10).fit_transform(X)
# pc_aut = PCA(n_components=10).fit_transform(Y)
# All = np.hstack((pc_swr, pc_aut))
# corr = np.corrcoef(All.T)
# detall = np.linalg.det(corr)
# shuffling.append(detall)
# axvline(real)
# hist(shuffling, 10)
# # for n in det.index: axvline(det.loc[n].values[0], label = n)
# # legend()
# show()
# shuffling.append(np.abs(np.linalg.det(c)))
# shuffling = np.array(shuffling)
# X = np.copy(swr[neurons].values.T)
# Y = np.copy(autocorr_sws[neurons].values.T)
# pc_swr = PCA(n_components=10).fit_transform(X)
# pc_aut = PCA(n_components=10).fit_transform(Y)
# #var
# a = pc_swr.T.dot(pc_aut)
# v = np.atleast_2d(np.std(pc_swr,0)).T.dot(np.atleast_2d(np.std(pc_aut,0)))
# c = (a/v)
# real = np.abs(np.linalg.det(c))
# a = pc_swr.T.dot(pc_aut)
# v = np.atleast_2d(np.std(pc_swr,0)).T.dot(np.atleast_2d(np.std(pc_aut,0)))
# M = (a/v)
# ############################################################################################################
# # MUTUAL INFORMATION
# ############################################################################################################
# from sklearn.metrics import *
# X = burstiness.loc[tokeep,'sws'].values
# Y = theta.loc[tokeep,('rem','kappa')].values
# Xc = np.digitize(X, np.linspace(X.min(), X.max()+0.001, 20))
# Yc = np.digitize(Y, np.linspace(Y.min(), Y.max()+0.001, 20))
# mutual_info_score(Xc, Yc)
# mis = pd.DataFrame(index = nucleus, columns = ['b/k'])
# for k, n in mappings.loc[tokeep].groupby('nucleus').groups.items():
# X = burstiness.loc[n,'sws'].values
# Y = theta.loc[n,('rem','kappa')].values
# Xc = np.digitize(X, np.linspace(X.min(), X.max()+0.001, 20))
# Yc = np.digitize(Y, np.linspace(Y.min(), Y.max()+0.001, 20))
# mis.loc[k] = adjusted_mutual_info_score(Xc, Yc)
# mis = mis.sort_values('b/k')
# plot(mis)
# show() | gpl-3.0 |
xyguo/scikit-learn | sklearn/neighbors/regression.py | 31 | 10999 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
arvind-rs/computer_vision | mean_shift_segmentation/mean_shift_segmentation.py | 1 | 2964 | #!/usr/bin/python
#Implementing image segmentation using mean shift clustering approach.
#Author: Arvind RS
#Date: 10/21/2016
import numpy as np, scipy.misc, scipy.signal
import time, os, math, sys
import matplotlib.pyplot as plt
#Algorithm:
#1. Load the input image into a variable X.
#2. For each pixel x in X:
#2.a Find all the neighbours of x as neighbours[].
#2.b calculate the mean shift for x and its neighbours.
#2.c assign x with the calculated mean shift value
#3. write X to the filesystem.
def load_image(filename):
#Load the image
I = scipy.misc.imread(filename)
return I
def get_euclidean_distance_vectorized(X,x):
#function to calculate and return the distance between points in a vector
return np.sqrt((X - x)**2)
def get_neighbours(x,X,distance):
#function to extract the points whose intensities are within the neighbourhood of the current point's intensity
neighbours = []
distance_vector = get_euclidean_distance_vectorized(X,x)
neighbours = np.extract(distance_vector<=distance,X)
return neighbours
def apply_gaussian_kernel_vectorized(distance_vector,sigma):
#function that appies the gaussian kernel and returns the value
temp = distance_vector / sigma
return (np.exp(-5*(temp)**2))
def mean_shift(image,sigma,distance,N):
#apply mean shift algorithm
shape = image.shape
print shape,sigma,distance,N
X = image.reshape([-1])
print X.shape
X_copy = np.copy(X)
for iteration in range(N):
for i,x in enumerate(X_copy):
#find the neighbours around x
neighbours = get_neighbours(x,X,distance)
#calculate the mean shift for the neighbours
numerator = 0
denominator = 0
distance_vector = get_euclidean_distance_vectorized(neighbours,x)
weights = apply_gaussian_kernel_vectorized(distance_vector,sigma)
numerator = np.sum(weights * neighbours)
denominator = np.sum(weights)
mean_shift_value = numerator / denominator
#update x value with mean shift value
X_copy[i] = mean_shift_value
O = X_copy.reshape(shape)
return O
def save_image(filename,image):
#function to save the output to the filesystem
scipy.misc.imsave(filename,image)
def main(filename):
#main function
current_path = os.getcwd()
#initializing values
file_path = current_path + "/" + filename
#load the image into a matrix I
I = load_image(file_path)
print I
#decided on this hs value by using the estimate_bandwidth() from sklearn.cluster
hs = 10.86
hr = 5
no_of_iterations = 1
print "running mean shift algorithm..."
O = mean_shift(I,hs,hr,no_of_iterations)
print O
print "saving output to file..."
filename = filename.replace(".jpg","")
save_image(filename+"_output.jpg",O)
if __name__ == "__main__":
start_time = time.time()
if len(sys.argv) < 2:
print "Insufficient arguments. Exiting!"
exit(0)
arg_input = sys.argv[1]
#filename = "1.jpg"
filename = arg_input
main(filename)
end_time = time.time()
print "Runtime : "+str((end_time - start_time) / 60)+" minutes"
| mit |
pombredanne/dask | dask/array/tests/test_percentiles.py | 6 | 1989 | import pytest
pytest.importorskip('numpy')
from dask.utils import skip
import dask.array as da
from dask.array.percentile import _percentile
import dask
import numpy as np
def eq(a, b):
if isinstance(a, da.Array):
a = a.compute(get=dask.get)
if isinstance(b, da.Array):
b = b.compute(get=dask.get)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_percentile():
d = da.ones((16,), chunks=(4,))
assert eq(da.percentile(d, [0, 50, 100]), [1, 1, 1])
x = np.array([0, 0, 5, 5, 5, 5, 20, 20])
d = da.from_array(x, chunks=(3,))
assert eq(da.percentile(d, [0, 50, 100]), [0, 5, 20])
assert same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50, 100]))
assert not same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50]))
x = np.array(['a', 'a', 'd', 'd', 'd', 'e'])
d = da.from_array(x, chunks=(3,))
assert eq(da.percentile(d, [0, 50, 100]), ['a', 'd', 'e'])
@skip
def test_percentile_with_categoricals():
try:
import pandas as pd
except ImportError:
return
x0 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
x1 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
dsk = {('x', 0): x0, ('x', 1): x1}
x = da.Array(dsk, 'x', chunks=((6, 6),))
p = da.percentile(x, [50])
assert (p.compute().categories == x0.categories).all()
assert (p.compute().codes == [0]).all()
assert same_keys(da.percentile(x, [50]),
da.percentile(x, [50]))
def test_percentiles_with_empty_arrays():
x = da.ones(10, chunks=((5, 0, 5),))
assert da.percentile(x, [10, 50, 90]).compute().tolist() == [1, 1, 1]
| bsd-3-clause |
Srisai85/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.