repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jnarhan/Breast_Cancer | src/models/dwdii_bc_model_helper.py | 1 | 12893 | #
# Author: Daniel Dittenhafer
#
# Created: Mar 14, 2017
#
# Description: Model Helper Functions
#
#
__author__ = 'Daniel Dittenhafer'
import collections
import csv
import os
import random
import sys
import gc
import itertools
from decimal import *
from scipy import misc
from scipy import ndimage
import numpy as np
from scipy import misc
from scipy import ndimage
import cv2
import matplotlib.cm as cm
import matplotlib.pyplot as plt
NDX_NAME = 0
NDX_TYPE = 1
NDX_ABTYPE = 2
NDX_SCANNER = 3
NDX_SUBFOLDER = 4
NDX_PATHOLOGY = 5
def load_training_metadata(metadataFile,
balanceViaRemoval = False,
verbose=False,
exclude = ['unproven', 'pathology', 'benign_without_callback'],
normalVsAbnormal=False):
""" Loads the designated meta data optionally balancing the data by removing heavily weighted category entries.
3 result sets are returned:
1) Dictionary where key = filename and value = label (normal, benign, malignant)
2) Dictionary where key = filename and value = list with values sub folder)= (0,1,2,3,4)
3) Dictionary where key = label (normal, benign, etc) and value = count of images in category.
:param metadataFile:
:param balanceViaRemoval:
:param verbose:
:param exclude:
:return:
"""
# Load the existing CSV so we can skip what we've already worked on
abnormalList = ["benign", "malignant"]
bcDict = {}
bcMetaDict = {}
bcCounts = collections.defaultdict(int)
with open(metadataFile, 'r') as csvfile:
bcCsv = csv.reader(csvfile)
headers = bcCsv.next()
for row in bcCsv:
subfld = row[NDX_SUBFOLDER]
patho = row[NDX_PATHOLOGY].lower()
if patho == "":
patho = "normal"
if patho in exclude:
pass
else:
if normalVsAbnormal and (patho in abnormalList):
patho = "abnormal"
# Load into our result sets
bcDict[row[0]] = patho
bcMetaDict[row[0]] = (subfld)
bcCounts[patho] += 1
if verbose:
print "Raw Balance"
print "----------------"
for e in bcCounts:
print e, bcCounts[e]
if balanceViaRemoval:
balanaceViaRemoval(bcCounts, bcDict, factor=1.0)
if verbose:
print
print "After Balancing"
print "----------------"
for e in bcCounts:
print e, bcCounts[e]
return bcDict, bcMetaDict, bcCounts
def balanaceViaRemoval(emoCounts, emoDict, depth = 0, factor = 1.50):
if(depth >= 2):
return
# First get mean items per category
sum = len(emoDict)
avgE = sum / len(emoCounts)
theshold = avgE * factor
if depth == 0:
print "balanaceViaRemoval.avgE: " + str(avgE)
print "balanaceViaRemoval.theshold: " + str(theshold)
# Determine categories for balancing.
toBeBalanced = []
for e in emoCounts.keys():
if emoCounts[e] > theshold:
toBeBalanced.append(e)
# iterate over categories to be balanced and do balancing.
for b in toBeBalanced:
candidatesForRemoval = []
for f in emoDict.keys():
if emoDict[f] == b:
candidatesForRemoval.append(f)
random.shuffle(candidatesForRemoval)
candidatesForRemoval = candidatesForRemoval[avgE:]
for c in candidatesForRemoval:
del emoDict[c]
emoCounts[b] = avgE
balanaceViaRemoval(emoCounts, emoDict, depth + 1, factor)
def bcNumerics():
emoNdx = {}
emoNdx["normal"] = 0
emoNdx["benign"] = 1
emoNdx["malignant"] = 2
return emoNdx
def numericBC():
emoNdx = bcNumerics()
ndxEmo = {}
for k in emoNdx:
ndxEmo[emoNdx[k]] = k
return ndxEmo
def bcNormVsAbnormNumerics():
emoNdx = {}
emoNdx["normal"] = 0
emoNdx["abnormal"] = 1
return emoNdx
def reverseDict(d):
ndxEmo = {}
for k in d:
ndxEmo[d[k]] = k
return ndxEmo
def load_data(metadataFile,
imagesPath,
categories = bcNumerics(),
verbose=True,
verboseFreq = 200,
maxData = None,
imgSize = (350, 350),
imgResize = None,
thesePathos = None,
normalVsAbnormal = False):
"""Helper function to load the training/test data"""
show = False
# Load the CSV meta data
emoMetaData, bcDetaDict, bcCounts = load_training_metadata(metadataFile, True, verbose=verbose, normalVsAbnormal=normalVsAbnormal)
total = len(emoMetaData)
ndx = 0
x, y = imgSize
if imgResize is not None:
x, y = imgResize
if maxData is not None:
total = maxData
# Allocate containers for the data
X_data = np.zeros([total, x, y])
Y_data = np.zeros([total, 1], dtype=np.int8)
# load the image bits based on what's in the meta data
for k in emoMetaData.keys():
if thesePathos is None or emoMetaData[k] in thesePathos:
# Load the file
filepath = os.path.join(imagesPath, bcDetaDict[k][0], k)
#filepath = filepath + ".png"
if(os.path.exists(filepath)):
img = misc.imread(filepath, flatten = True) # flatten = True?
else:
img = None
print "Not Found: " + filepath
# Only accept images that were loaded
if img is not None:
# Verbose status
if verbose and ndx % verboseFreq == 0:
msg = "{0:.4f}: {1}\r\n".format(ndx / Decimal(total), k)
sys.stdout.writelines(msg)
# Resize if desired.
if imgResize is not None:
img = misc.imresize(img, imgResize)
gc.collect()
if show:
plt.imshow(img, cmap=cm.gray)
plt.show()
X_data[ndx] = img
rawEmotion = emoMetaData[k]
emotionKey = rawEmotion.lower()
emotionNdx = categories[emotionKey]
Y_data[ndx] = emotionNdx
ndx += 1
if maxData is not None and maxData <= ndx:
break
Y_data = Y_data[:ndx]
X_data = X_data[:ndx]
X_data = X_data.astype('float32')
X_data /= 255.0
return X_data, Y_data
def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes) to binary class matrix, for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
nb_classes: total number of classes
# Returns
A binary matrix representation of the input.
'''
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
def getNameParts(name):
parts = name.split(".")
sideParts = parts[1].split("_")
case = parts[0]
side = sideParts[0]
return case, side
def splitTrainTestValSets(metadataFile, valCsv, testCsv, trainCsv, valSize = 100, trainPct = 0.80, seed = 20275):
"""Generates 3 CSV files containing the meta data split from the source meta data file. First a Numpy
random shuffle is performed on the data loaded from the metadataFile.
:param metadataFile: the path to the source CSV file
:param valCsv: The path to the output CSV to be overwritten by the new validation meta data.
:param testCsv: The path to the output CSV to be overwritten by the new test meta data.
:param trainCsv: The path to the output CSV to be overwritten by the new train meta data.
:param valSize: The number of data rows to pull out for validation purposes
:param trainPct: Of the remaining data rows after the validation rows have been removed, the percent of
data to seperate for training purposes. After the training data is extracted, the final
remaining data is saved to the test data set.
"""
caseSides = {}
with open(metadataFile, 'r') as csvfile:
bcCsv = csv.reader(csvfile)
headers = bcCsv.next()
headers = bcCsv.next()
for row in bcCsv:
case, side = getNameParts(row[NDX_NAME])
key = "{0}-{1}".format(case, side)
# build list of case-sides
caseSides[key] = (case, side)
# Split the keys up
csKeys = caseSides.keys()
# Shuffle
np.random.seed(seed)
np.random.shuffle(csKeys)
valKeys = csKeys[0 : valSize]
remainingKeys = csKeys[valSize + 1 : len(csKeys) - 1]
trainNdx = int(round(len(remainingKeys) * trainPct))
trainKeys = remainingKeys[0 : trainNdx]
testKeys = remainingKeys[trainNdx + 1 : len(remainingKeys) - 1]
# split the actual meta data
with open(metadataFile, 'r') as csvfile:
with open(valCsv, 'wb') as valfile:
with open(testCsv, 'wb') as testfile:
with open(trainCsv, 'wb') as trainfile:
bcCsv = csv.reader(csvfile)
valCsv = csv.writer(valfile)
testCsv = csv.writer(testfile)
trainCsv = csv.writer(trainfile)
headers = bcCsv.next()
headers = bcCsv.next()
valCsv.writerow(headers)
testCsv.writerow(headers)
trainCsv.writerow(headers)
for row in bcCsv:
case, side = getNameParts(row[NDX_NAME])
key = "{0}-{1}".format(case, side)
if(key in valKeys):
valCsv.writerow(row)
elif (key in testKeys):
testCsv.writerow(row)
elif (key in trainKeys):
trainCsv.writerow(row)
return trainKeys, testKeys, valKeys
# for k in csKeys:
def load_mias_labeldata(metadataFile, skip_lines=102):
ld = {}
with open(metadataFile, 'r') as csvfile:
emoCsv = csv.reader(csvfile, delimiter=' ')
# skip first 104 lines of description info
for i in range(0, skip_lines):
emoCsv.next()
for row in emoCsv:
if len(row) >= 2:
ld[row[0]] = [row[2]]
if row[2] != "NORM":
ld[row[0]].append(row[3])
return ld
# From: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{0:.4f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def cleanDataSet(csvFile, imageRoot):
data = []
with open(csvFile, 'r') as csvfile:
bcCsv = csv.reader(csvfile)
headers = bcCsv.next()
for row in bcCsv:
name = row[NDX_NAME]
subfld = row[NDX_SUBFOLDER]
fullName = os.path.join(imageRoot, subfld, name)
if os.path.exists(fullName):
data.append(row)
else:
print "Not found: " + fullName
with open(csvFile + "2.csv", 'wb') as file:
dataCsv = csv.writer(file)
dataCsv.writerow(headers)
for row in data:
dataCsv.writerow(row)
def reflectY(img):
tx = [[1, 0], [0, -1]]
offset = [0, img.shape[0]]
img2 = ndimage.interpolation.affine_transform(img, tx, offset)
return img2
| mit |
stylianos-kampakis/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mne-tools/mne-python | mne/tests/test_cov.py | 4 | 34609 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import itertools as itt
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info, compute_rank)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info
from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import requires_sklearn, catch_logging, assert_snr
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
with pytest.raises(ValueError, match='Invalid value'):
compute_covariance(epochs, on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmpdir):
"""Test ad hoc cov creation and I/O."""
out_fname = tmpdir.join('test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmpdir):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(tmpdir.join('test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, 'empirical', 'shrunk'))
def test_cov_estimation_on_raw(method, tmpdir):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with pytest.warns(None): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank, tmpdir):
"""Test estimation from raw with triggers."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_km = read_cov(cov_km_fname)
# adjust for nfree bug
cov_km['nfree'] -= 1
_assert_cov(cov, cov_km)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
assert cov_km.nfree == cov.nfree
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_sklearn
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differencess
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagnoal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_allclose(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask], rtol=1e-12)
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert ' Setting small MAG eigenvalues to zero (without PCA)' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with pytest.warns(None):
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_sklearn
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-15000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-18000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-15000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_sklearn
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types(meg=True)
assert len(epochs_meg.ch_names) == 306
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.warns(RuntimeWarning, match='few samples'):
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306))
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_sklearn
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
def test_equalize_channels():
"""Test equalization of channels for instances of Covariance."""
cov1 = make_ad_hoc_cov(create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.0,
ch_types='eeg'))
cov2 = make_ad_hoc_cov(create_info(['CH5', 'CH1', 'CH2'], sfreq=1.0,
ch_types='eeg'))
cov1, cov2 = equalize_channels([cov1, cov2])
assert cov1.ch_names == ['CH1', 'CH2']
assert cov2.ch_names == ['CH1', 'CH2']
def test_compute_whitener_rank():
"""Test risky rank options."""
info = read_info(ave_fname)
info = pick_info(info, pick_types(info, meg=True))
info['projs'] = []
# need a square version because the diag one takes shortcuts in
# compute_whitener (users shouldn't even need this function so it's
# private)
cov = make_ad_hoc_cov(info)._as_square()
assert len(cov['names']) == 306
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 306
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
cov['data'][-1] *= 1e-14 # trivially rank-deficient
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 305
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
# this should emit a warning
with pytest.warns(RuntimeWarning, match='exceeds the estimated'):
_, _, rank = compute_whitener(cov, info, rank=dict(meg=306),
return_rank=True)
assert rank == 306
| bsd-3-clause |
WillemWybo/SGF_formalism | demo.py | 1 | 4706 | import matplotlib.pyplot as pl
import numpy as np
import copy
import pickle
import sys
sys.setrecursionlimit(2000)
import morphologyReader as morphR
import neuronModels as neurM
import functionFitter as funF
## parameters
Veq = -65. # mV
tmax = 300. # ms
dt = .1 # ms
K = 4
## initialization #####################################################################
## Step 0: initialize the morphology
# Specify the path to an '.swc' file.
morphfile = 'morphologies/ball_and_stick_taper.swc'
# Define the ion channel distributions for dendrites and soma. Here the neuron model is
# passive.
d_distr = {'L': {'type': 'fit', 'param': [Veq, 50.], 'E': Veq, 'calctype': 'pas'}}
s_distr = {'L': {'type': 'fit', 'param': [Veq, 50.], 'E': Veq, 'calctype': 'pas'}}
# initialize a greensTree. Here, all the quantities are stored to compute the GF in the
# frequency domain (algorithm of Koch and Poggio, 1985).
greenstree = morphR.greensTree(morphfile, soma_distr=s_distr, ionc_distr=d_distr, cnodesdistr='all')
# initialize a greensFunctionCalculator using the previously created greensTree. This class
# stores all variables necessary to compute the GF in a format fit for simulation, either
# the plain time domain or with the partial fraction decomposition.
gfcalc = morphR.greensFunctionCalculator(greenstree)
gfcalc.set_impedances_logscale(fmax=7, base=10, num=200)
# Now a list of input locations needs to be defined. For the sparse reformulation, the
# first location needs to be the soma
inlocs = [ {'node': 1, 'x': .5, 'ID': 0}, {'node': 4, 'x': .5, 'ID': 1}, {'node': 5, 'x': .5, 'ID': 2},
{'node': 6, 'x': .5, 'ID': 3}, {'node': 7, 'x': .5, 'ID': 4}, {'node': 8, 'x': .5, 'ID': 5},
{'node': 9, 'x': .5, 'ID': 6}]
## Steps 1,2,3 and 4:
# find sets of nearest neighbours, computes the necessary GF kernels, then computes the
# sparse kernels and then fits the partial fraction decomposition using the VF algorithm.
alphas, gammas, pairs, Ms = gfcalc.kernelSet_sparse(inlocs, FFT=False, kernelconstants=True)
## Step 4 bis: compute the vectors that will be used in the simulation
prep = neurM.preprocessor()
mat_dict_hybrid = prep.construct_volterra_matrices_hybrid(dt, alphas, gammas, K, pprint=False)
## Examples of steps that happen within the kernelSet_sparse function
## Step 1: example to find the nearest neighbours
NNs, _ = gfcalc.greenstree.get_nearest_neighbours(inlocs, add_leaves=False, reduced=False)
## Step 2: example of finding a kernel
g_example = gfcalc.greenstree.calc_greensfunction(inlocs[0], inlocs[1], voltage=True)
## Step 4: example of computing a partial fraction decomposition
FEF = funF.fExpFitter()
alpha_example, gamma_example, pair_example, rms = FEF.fitFExp_increment(gfcalc.s, g_example, \
rtol=1e-8, maxiter=50, realpoles=False, constrained=True, zerostart=False)
# # plot the kernel example
# pl.figure('kernel example')
# pl.plot(gfcalc.s.imag, g_example.real, 'b')
# pl.plot(gfcalc.s.imag, g_example.real, 'r')
#######################################################################################
## Simulation #########################################################################
# define a synapse and a spiketime
synapseparams = [{'node': 9, 'x': .5, 'ID': 0, 'tau1': .2, 'tau2': 3., 'E_r': 0., 'weight': 5.*1e-3}]
spiketimes = [{'ID': 0, 'spks': [10.]}]
# ion channel conductances at integration points, in this example, there is only leak which is
# already incorporated in the GF
gs_point = {inloc['ID']: {'L': 0.} for inloc in inlocs}
es_point = {inloc['ID']: {'L': -65.} for inloc in inlocs}
gcalctype_point = {inloc['ID']: {'L': 'pas'} for inloc in inlocs}
# create an SGF neuron
SGFneuron = neurM.integratorneuron(inlocs, synapseparams, [], gs_point, es_point, gcalctype_point,
E_eq=Veq, nonlinear=False)
# run the simulation
SGFres = SGFneuron.run_volterra_hybrid(tmax, dt, spiketimes, mat_dict=mat_dict_hybrid)
# run a neuron simulation for comparison
NEURONneuron = neurM.NeuronNeuron(greenstree, dt=dt, truemorph=True, factorlambda=10.)
NEURONneuron.add_double_exp_synapses(copy.deepcopy(synapseparams))
NEURONneuron.set_spiketrains(spiketimes)
NEURres = NEURONneuron.run(tdur=tmax, pprint=False)
#######################################################################################
## plot trace
pl.figure('simulation')
pl.plot(NEURres['t'], NEURres['vmsoma'], 'r-', label=r'NEURON soma')
pl.plot(NEURres['t'], NEURres[0], 'b-', label=r'NEURON syn')
pl.plot(SGFres['t'], SGFres['Vm'][0,:], 'r--', lw=1.7, label=r'SGF soma')
pl.plot(SGFres['t'], SGFres['Vm'][-1,:], 'b--', lw=1.7, label=r'SGF syn')
pl.xlabel(r'$t$ (ms)')
pl.ylabel(r'$V_m$ (mV)')
pl.legend(loc=0)
pl.show()
| mit |
xubenben/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
tdhopper/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/metrics/metrics.py | 1 | 31423 | """Utilities to evaluate the predictive performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better
Function named as *_loss return a scalar value to minimize: the lower the
better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD Style.
import numpy as np
from ..utils import check_arrays
from ..utils import deprecated
def unique_labels(*lists_of_labels):
"""Extract an ordered array of unique labels"""
labels = set()
for l in lists_of_labels:
if hasattr(l, 'ravel'):
l = l.ravel()
labels |= set(l)
return np.unique(sorted(labels))
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix cm is such that cm[i, j] is equal
to the number of observations known to be in group i but predicted
to be in group j.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
estimated targets
labels : array, shape = [n_classes]
lists all labels occuring in the dataset.
If none is given, those that appear at least once
in y_true or y_pred are used.
Returns
-------
CM : array, shape = [n_classes, n_classes]
confusion matrix
References
----------
http://en.wikipedia.org/wiki/Confusion_matrix
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
if n_labels >= 15:
CM = np.zeros((n_labels, n_labels), dtype=np.long)
for yt, yp in zip(y_true, y_pred):
CM[label_to_ind[yt], label_to_ind[yp]] += 1
else:
CM = np.empty((n_labels, n_labels), dtype=np.long)
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
CM[i, j] = np.sum(
np.logical_and(y_true == label_i, y_pred == label_j))
return CM
def roc_curve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on y_score used to compute fpr and tpr.
*Note*: Since the thresholds are sorted from low to high values,
they are reversed upon returning them to ensure they
correspond to both fpr and tpr, which are sorted in reversed order
during their calculation.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = np.ravel(y_true)
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = np.ravel(y_score)
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds[::-1]
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis and using y to break ties
x, y = np.array(sorted(points for points in zip(x, y))).T
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='weighted'):
"""Compute the precision
The precision is the ratio :math:`tp / (tp + fp)` where tp is the
number of true positives and fp the number of false positives. The
precision is intuitively the ability of the classifier not to
label as positive a sample that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : array, shape = [n_samples]
True targets
y_pred : array, shape = [n_samples]
Predicted targets
labels : array
Integer array of labels
pos_label : int
In the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Set to None in the case of multiclass classification.
average : string, [None, 'micro', 'macro', 'weighted'(default)]
In the multiclass classification case, this determines the
type of averaging performed on the data.
macro:
Average over classes (does not take imbalance into account).
micro:
Average over instances (takes imbalance into account).
This implies that ``precision == recall == f1``
weighted:
Average weighted by support (takes imbalance into account).
Can result in f1 score that is not between precision and recall.
Returns
-------
precision : float
Precision of the positive class in binary classification or
weighted average of the precision of each class for the
multiclass task
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='weighted'):
"""Compute the recall
The recall is the ratio :math:`tp / (tp + fn)` where tp is the number of
true positives and fn the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : array, shape = [n_samples]
True targets
y_pred : array, shape = [n_samples]
Predicted targets
labels : array
Integer array of labels
pos_label : int
In the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Set to None in the case of multiclass classification.
average : string, [None, 'micro', 'macro', 'weighted'(default)]
In the multiclass classification case, this determines the
type of averaging performed on the data.
macro:
Average over classes (does not take imbalance into account).
micro:
Average over instances (takes imbalance into account).
This implies that ``precision == recall == f1``
weighted:
Average weighted by support (takes imbalance into account).
Can result in f1 score that is not between precision and recall.
Returns
-------
recall : float
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average)
return r
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='weighted'):
"""Compute fbeta score
The F_beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The beta parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors precision (``beta == 0`` considers only precision, ``beta == inf``
only recall).
Parameters
----------
y_true : array, shape = [n_samples]
True targets
y_pred : array, shape = [n_samples]
Predicted targets
beta: float
Weight of precision in harmonic mean.
labels : array
Integer array of labels
pos_label : int
In the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Set to None in the case of multiclass classification.
average : string, [None, 'micro', 'macro', 'weighted'(default)]
In the multiclass classification case, this determines the
type of averaging performed on the data.
macro:
Average over classes (does not take imbalance into account).
micro:
Average over instances (takes imbalance into account).
This implies that ``precision == recall == f1``
weighted:
Average weighted by support (takes imbalance into account).
Can result in f1 score that is not between precision and recall.
Returns
-------
fbeta_score : float
fbeta_score of the positive class in binary classification or weighted
average of the fbeta_score of each class for the multiclass task.
References
----------
R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval.
Addison Wesley, pp. 327-328.
http://en.wikipedia.org/wiki/F1_score
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average)
return f
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='weighted'):
"""Compute f1 score
The F1 score can be interpreted as a weighted average of the precision
and recall, where an F1 score reaches its best value at 1 and worst
score at 0. The relative contribution of precision and recall to the f1
score are equal. The formular for the F_1 score is::
F_1 = 2 * (precision * recall) / (precision + recall)
See: http://en.wikipedia.org/wiki/F1_score
In the multi-class case, this is the weighted average of the f1-score of
each class.
Parameters
----------
y_true : array, shape = [n_samples]
True targets
y_pred : array, shape = [n_samples]
Predicted targets
labels : array
Integer array of labels
pos_label : int
In the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Set to None in the case of multiclass classification.
average : string, [None, 'micro', 'macro', 'weighted'(default)]
In the multiclass classification case, this determines the
type of averaging performed on the data.
macro:
Average over classes (does not take imbalance into account).
micro:
Average over instances (takes imbalance into account).
This implies that ``precision == recall == f1``
weighted:
Average weighted by support (takes imbalance into account).
Can result in f1 score that is not between precision and recall.
Returns
-------
f1_score : float
f1_score of the positive class in binary classification or weighted
average of the f1_scores of each class for the multiclass task
References
----------
http://en.wikipedia.org/wiki/F1_score
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average)
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None):
"""Compute precisions, recalls, f-measures and support for each class
The precision is the ratio :math:`tp / (tp + fp)` where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio :math:`tp / (tp + fn)` where tp is the number of
true positives and fn the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F_beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F_beta score reaches its best
value at 1 and worst score at 0.
The F_beta score weights recall beta as much as precision. beta = 1.0 means
recall and precsion are equally important.
The support is the number of occurrences of each class in y_true.
If pos_label is None, this function returns the average precision, recall
and f-measure if `average` is one of 'micro', 'macro', 'weighted'.
Parameters
----------
y_true : array, shape = [n_samples]
True targets
y_pred : array, shape = [n_samples]
Predicted targets
beta : float, 1.0 by default
The strength of recall versus precision in the f-score.
labels : array
Integer array of labels
pos_label : int
In the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Set to None in the case of multiclass classification.
average : string, [None, 'micro', 'macro', 'weighted'(default)]
In the multiclass classification case, this determines the
type of averaging performed on the data.
macro:
Average over classes (does not take imbalance into account).
micro:
Average over instances (takes imbalance into account).
This implies that ``precision == recall == f1``
weighted:
Average weighted by support (takes imbalance into account).
Can result in f1 score that is not between precision and recall.
Returns
-------
precision: array, shape = [n_unique_labels], dtype = np.double
recall: array, shape = [n_unique_labels], dtype = np.double
f1_score: array, shape = [n_unique_labels], dtype = np.double
support: array, shape = [n_unique_labels], dtype = np.long
References
----------
http://en.wikipedia.org/wiki/Precision_and_recall
"""
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_true, y_pred = check_arrays(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
true_pos = np.zeros(n_labels, dtype=np.double)
false_pos = np.zeros(n_labels, dtype=np.double)
false_neg = np.zeros(n_labels, dtype=np.double)
support = np.zeros(n_labels, dtype=np.long)
for i, label_i in enumerate(labels):
true_pos[i] = np.sum(y_pred[y_true == label_i] == label_i)
false_pos[i] = np.sum(y_pred[y_true != label_i] == label_i)
false_neg[i] = np.sum(y_pred[y_true == label_i] != label_i)
support[i] = np.sum(y_true == label_i)
try:
# oddly, we may get an "invalid" rather than a "divide" error here
old_err_settings = np.seterr(divide='ignore', invalid='ignore')
# precision and recall
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
# handle division by 0.0 in precision and recall
precision[(true_pos + false_pos) == 0.0] = 0.0
recall[(true_pos + false_neg) == 0.0] = 0.0
# fbeta score
beta2 = beta ** 2
fscore = (1 + beta2) * (precision * recall) / (
beta2 * precision + recall)
# handle division by 0.0 in fscore
fscore[(precision + recall) == 0.0] = 0.0
finally:
np.seterr(**old_err_settings)
if not average:
return precision, recall, fscore, support
elif n_labels == 2:
if pos_label not in labels:
raise ValueError("pos_label=%d is not a valid label: %r" %
(pos_label, labels))
pos_label_idx = list(labels).index(pos_label)
return (precision[pos_label_idx], recall[pos_label_idx],
fscore[pos_label_idx], support[pos_label_idx])
else:
average_options = (None, 'micro', 'macro', 'weighted')
if average == 'micro':
avg_precision = true_pos.sum() / (true_pos.sum() +
false_pos.sum())
avg_recall = true_pos.sum() / (true_pos.sum() + false_neg.sum())
avg_fscore = (1 + beta2) * (avg_precision * avg_recall) / \
(beta2 * avg_precision + avg_recall)
elif average == 'macro':
avg_precision = np.mean(precision)
avg_recall = np.mean(recall)
avg_fscore = np.mean(fscore)
elif average == 'weighted':
avg_precision = np.average(precision, weights=support)
avg_recall = np.average(recall, weights=support)
avg_fscore = np.average(fscore, weights=support)
else:
raise ValueError('average has to be one of ' +
str(average_options))
return avg_precision, avg_recall, avg_fscore, None
def matthews_corrcoef(y_true, y_pred):
"""Returns matthew's correlation coefficient for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are
of very different sizes. The MCC is in essence a correlation coefficient
value between -1 and +1. A coefficient of +1 represents a perfect
prediction, 0 an average random prediction and -1 an inverse prediction.
The statistic is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
estimated targets
Returns
-------
mcc : float
matthew's correlation coefficient (+1 represents a perfect prediction,
0 an average random prediction and -1 and inverse prediction).
References
----------
http://en.wikipedia.org/wiki/Matthews_correlation_coefficient
http://dx.doi.org/10.1093/bioinformatics/16.5.412
"""
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def classification_report(y_true, y_pred, labels=None, target_names=None):
"""Build a text report showing the main classification metrics
Parameters
----------
y_true : array, shape = [n_samples]
True targets
y_pred : array, shape = [n_samples]
Estimated targets
labels : array, shape = [n_labels]
Optional list of label indices to include in the report
target_names : list of strings
Optional display names matching the labels (same order)
Returns
-------
report : string
Text summary of the precision, recall, f1-score for each class
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%d' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading))
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["%0.2f" % float(v)]
values += ["%d" % int(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["%0.2f" % float(v)]
values += ['%d' % np.sum(s)]
report += fmt % tuple(values)
return report
def precision_recall_curve(y_true, probas_pred):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio :math:`tp / (tp + fp)` where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio :math:`tp / (tp + fn)` where tp is the number of
true positives and fn the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}
probas_pred : array, shape = [n_samples]
Estimated probabilities
Returns
-------
precision : array, shape = [n + 1]
Precision values
recall : array, shape = [n + 1]
Recall values
thresholds : array, shape = [n]
Thresholds on y_score used to compute precision and recall
"""
y_true = y_true.ravel()
labels = np.unique(y_true)
if np.all(labels == np.array([-1, 1])):
# convert {-1, 1} to boolean {0, 1} repr
y_true = y_true.copy()
y_true[y_true == -1] = 0
elif not np.all(labels == np.array([0, 1])):
raise ValueError("y_true contains non binary labels: %r" % labels)
probas_pred = probas_pred.ravel()
thresholds = np.sort(np.unique(probas_pred))
n_thresholds = thresholds.size + 1
precision = np.empty(n_thresholds)
recall = np.empty(n_thresholds)
for i, t in enumerate(thresholds):
y_pred = (probas_pred >= t).astype(np.int)
p, r, _, _ = precision_recall_fscore_support(y_true, y_pred)
precision[i] = p[1]
recall[i] = r[1]
precision[-1] = 1.0
recall[-1] = 0.0
return precision, recall, thresholds
def explained_variance_score(y_true, y_pred):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Note: the explained variance is not a symmetric function.
return the explained variance
Parameters
----------
y_true : array-like
y_pred : array-like
"""
y_true, y_pred = check_arrays(y_true, y_pred)
numerator = np.var(y_true - y_pred)
denominator = np.var(y_true)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred):
"""R^2 (coefficient of determination) regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
z : float
The R^2 score
Notes
-----
This is not a symmetric function.
References
----------
http://en.wikipedia.org/wiki/Coefficient_of_determination
"""
y_true, y_pred = check_arrays(y_true, y_pred)
numerator = ((y_true - y_pred) ** 2).sum()
denominator = ((y_true - y_true.mean()) ** 2).sum()
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def zero_one_score(y_true, y_pred):
"""Zero-one classification score (accuracy)
Positive integer (number of good classifications).
The best performance is 1.
Return the fraction of correct predictions in y_pred.
Parameters
----------
y_true : array-like, shape = n_samples
Gold standard labels.
y_pred : array-like, shape = n_samples
Predicted labels, as returned by a classifier.
Returns
-------
score : float
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.mean(y_pred == y_true)
###############################################################################
# Loss functions
def zero_one(y_true, y_pred):
"""Zero-One classification loss
Positive integer (number of misclassifications). The best performance
is 0.
Return the number of errors
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
loss : float
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.sum(y_pred != y_true)
def mean_squared_error(y_true, y_pred):
"""Mean squared error regression loss
Return a a positive floating point value (the best value is 0.0).
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
loss : float
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.mean((y_pred - y_true) ** 2)
@deprecated("""Incorrectly returns the cumulated error: use mean_squared_error
instead; to be removed in v0.12""")
def mean_square_error(y_true, y_pred):
"""Cumulated square error regression loss
Positive floating point value: the best value is 0.0.
return the mean square error
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
loss : float
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.linalg.norm(y_pred - y_true) ** 2
def hinge_loss(y_true, pred_decision, pos_label=1, neg_label=-1):
"""
Cumulated hinge loss (non-regularized).
Assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, margin = y_true * pred_decision
is always negative (since the signs disagree), therefore 1 - margin
is always greater than 1. The cumulated hinge loss therefore
upperbounds the number of mistakes made by the classifier.
Parameters
----------
y_true : array, shape = [n_samples]
True target (integers)
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats)
"""
# TODO: multi-class hinge-loss
if pos_label != 1 or neg_label != -1:
# the rest of the code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
y_true = y_true.copy()
y_true[y_true == pos_label] = 1
y_true[y_true == neg_label] = -1
margin = y_true * pred_decision
losses = 1 - margin
# The hinge doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.mean(losses)
| bsd-3-clause |
nhejazi/scikit-learn | examples/svm/plot_svm_margin.py | 88 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
alexis-roche/nipy | examples/algorithms/mixed_effects.py | 4 | 1939 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
This example illustrates the impact of using a mixed-effects model
for the detection of the effects, when the first-level variance is known:
If the first level variance is very variable across observations, then taking
it into account gives more relibale detections, as seen in an ROC curve.
Requires matplotlib.
Author: Bertrand Thirion, 2012
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from nipy.algorithms.statistics.mixed_effects_stat import (
generate_data, one_sample_ttest, t_stat)
# generate the data
N, P = 15, 500
V1 = np.random.randn(N, P) ** 2
effects = 0.5 * (np.random.randn(P) > 0)
Y = generate_data(np.ones(N), effects, .25, V1)
# compute the statistics
T1 = one_sample_ttest(Y, V1, n_iter=5)
T1 = [T1[effects == x] for x in np.unique(effects)]
T2 = [t_stat(Y)[effects == x] for x in np.unique(effects)]
# Derive ROC curves
ROC1 = np.array([np.sum(T1[1] > - x) for x in np.sort(- T1[0])])\
* 1. / T1[1].size
ROC2 = np.array([np.sum(T2[1] > - x) for x in np.sort(- T2[0])])\
* 1. / T1[1].size
# make a figure
FIG = plt.figure(figsize=(10, 5))
AX = FIG.add_subplot(121)
AX.plot(np.linspace(0, 1, len(ROC1)), ROC1, label='mixed effects')
AX.plot(np.linspace(0, 1, len(ROC2)), ROC2, label='t test')
AX.set_xlabel('false positives')
AX.set_ylabel('true positives')
AX.set_title('ROC curves for the detection of effects', fontsize=12)
AX.legend(loc='lower right')
AX = FIG.add_subplot(122)
AX.boxplot(T1, positions=[-0.1, .9])
AX.boxplot(T2, positions=[0.1, 1.1])
AX.set_xticks([0, 1])
AX.set_xlabel('simulated effects')
AX.set_ylabel('decision statistic')
AX.set_title('left: mixed effects model, \n right: standard t test',
fontsize=12)
plt.show()
| bsd-3-clause |
MTG/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics-phase.py | 2 | 1988 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(pX1.size)/float(N), pX1, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, 50, 200])
plt.title('pX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(pX2.size)/float(N), pX2, 'c', lw=1.5)
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,min(pX2), 25])
plt.title('pX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX3.size)/float(N), pX3, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,2, 24])
plt.title('pX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics-phase.png')
plt.show()
| agpl-3.0 |
nelson-liu/scikit-learn | sklearn/ensemble/tests/test_forest.py | 19 | 41737 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 1 | 5845 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if isinstance(v, Series):
s = v
elif isinstance(v, Transform) and v.input_valency() == 0:
s = v()
# TODO(jamieas): hook up these special cases again
# TODO(soergel): can these special cases be generalized?
# elif isinstance(v, pd.Series):
# s = series.NumpySeries(v.values)
# elif isinstance(v, np.ndarray):
# s = series.NumpySeries(v)
else:
raise TypeError(
"Column in assignment must be an inflow.Series, pandas.Series or a"
" numpy array; got type '%s'." % type(v).__name__)
self._columns[k] = s
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def build(self):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache) for name, c in self._columns.items()}
return tensors
def to_input_fn(self, feature_keys=None, target_keys=None):
"""Build an input_fn suitable for use with Estimator.
Args:
feature_keys: the names of columns to be used as features. If None, all
columns except those in target_keys are used.
target_keys: the names of columns to be used as targets. None is
acceptable for unsupervised learning.
Returns:
A function that returns a pair of dicts (features, targets), each mapping
string names to Tensors.
Raises:
ValueError: when the feature and target key sets are non-disjoint
"""
if target_keys is None:
target_keys = []
if feature_keys is None:
feature_keys = self.columns() - set(target_keys)
else:
in_both = set(feature_keys) & set(target_keys)
if in_both:
raise ValueError(
"Columns cannot be used for both features and targets: %s" %
", ".join(in_both))
def input_fn():
# It's important to build all the tensors together in one DataFrame.
# If we did df.select() for both key sets and then build those, the two
# resulting DataFrames would be shuffled independently.
tensors = self.build()
# Note that (for now at least) we provide our columns to Estimator keyed
# by strings, so they are base features as far as Estimator is concerned.
# TODO(soergel): reconcile with FeatureColumn keys, Transformer etc.
features = {key: tensors[key] for key in feature_keys}
targets = {key: tensors[key] for key in target_keys}
return features, targets
return input_fn
| apache-2.0 |
pnedunuri/scipy | tools/refguide_check.py | 29 | 23595 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
skip_types = (dict, str, unicode, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
def check_doctests(module, verbose, dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
# the namespace to run examples in
ns = {'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
# if MPL is available, use display-less backend
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim'}
def __init__(self, parse_namedtuples=True, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(ns))
a_got = eval(got, dict(ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except TypeError:
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
# Loop over non-deprecated items
results = []
all_success = True
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
all_success = False
if have_matplotlib:
plt.close('all')
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
return results
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=list(PUBLIC_SUBMODULES),
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
args = parser.parse_args(argv)
modules = []
names_dict = {}
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
rbharath/deepchem | deepchem/dock/tests/test_pose_scoring.py | 2 | 1939 | """
Tests for Pose Scoring
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import sys
import unittest
import tempfile
import os
import shutil
import numpy as np
import deepchem as dc
from sklearn.ensemble import RandomForestRegressor
from subprocess import call
class TestPoseScoring(unittest.TestCase):
"""
Does sanity checks on pose generation.
"""
def setUp(self):
"""Downloads dataset."""
call(
"wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz".
split())
call("tar -zxvf core_grid.tar.gz".split())
self.core_dataset = dc.data.DiskDataset("core_grid/")
def tearDown(self):
"""Removes dataset"""
call("rm -rf core_grid/".split())
def test_pose_scorer_init(self):
"""Tests that pose-score works."""
if sys.version_info >= (3, 0):
return
sklearn_model = RandomForestRegressor(n_estimators=10)
model = dc.models.SklearnModel(sklearn_model)
print("About to fit model on core set")
model.fit(self.core_dataset)
pose_scorer = dc.dock.GridPoseScorer(model, feat="grid")
def test_pose_scorer_score(self):
"""Tests that scores are generated"""
if sys.version_info >= (3, 0):
return
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
sklearn_model = RandomForestRegressor(n_estimators=10)
model = dc.models.SklearnModel(sklearn_model)
print("About to fit model on core set")
model.fit(self.core_dataset)
pose_scorer = dc.dock.GridPoseScorer(model, feat="grid")
score = pose_scorer.score(protein_file, ligand_file)
assert score.shape == (1,)
| mit |
amadeuspzs/travelTime | utils.py | 1 | 1252 | import pandas as pd
# Return start of the week
def find_week_start(data):
return find_day_start(data,1) # mondays
# Return start and end of week
def find_weeks(data):
start = find_week_start(data)
weeks = []
for i in range(len(start)):
if i < len(start)-1:
weeks.append([start[i],start[i+1]-1])
else:
weeks.append([start[i],len(data)-1])
return weeks
def find_day_start(data,day):
days = []
seek_day = True
next_day = day + 1 if day < 6 else 0
for index, row in data.iterrows():
if seek_day is True and int(row.Timestamp.strftime("%w")) == day:
days.append(index)
seek_day = False
elif seek_day is False and int(row.Timestamp.strftime("%w")) == next_day:
seek_day = True
return days
def find_days(data,day):
days = []
next_day = day + 1 if day < 6 else 0
day_starts = find_day_start(data,day)
# don't include next days that start before the first day
# start next days search at beginning of data for first first day
next_day_starts = find_day_start(data[day_starts[0]:],next_day)
for i in range(len(day_starts)):
days.append([day_starts[i], next_day_starts[i]-1 ])
return days | mit |
Archman/felapps | felapps/utils/funutils.py | 1 | 63024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------#
"""
Author: Tong Zhang
Created Time: 11:09, Jan. 29, 2015
utilities/functions for convenience
"""
#-------------------------------------------------------------------------#
from __future__ import division
from __future__ import print_function
import wx
import wx.lib.mixins.listctrl as listmix
import numpy as np
import os
import matplotlib.colors as colors
from matplotlib.text import Text as mText
from matplotlib.lines import Line2D as mLine
import time
import sys
import h5py
import shutil
import random
import string
from . import EnhancedStatusBar as ESB
from . import uiutils
import lmfit
def rescaleImage(image0, scaledFac):
"""
Jan. 28, 2105
rescale image, given the image full path on disk
"""
#originalImage = wx.Image(imagePath, type = wx.BITMAP_TYPE_ANY)
originalImage = image0
imgW, imgH = originalImage.GetSize()
scaledImage = originalImage.Scale(imgW * scaledFac, imgH * scaledFac)
return scaledImage
def findObj(objroot, objclass):
"""
Find all the objects that is instance of objclass belongs to objroot.
obj
"""
objfound = []
for obj in dir(objroot):
obji = eval('objroot.' + obj)
if isinstance(obji, objclass):
objfound.append(obji)
return objfound
class MySpinCtrl(wx.SpinCtrl):
"""
font: wx.Font()
"""
def __init__(self,
parent,
font=None,
fontsize=10,
fontcolor='black',
fontweight=wx.FONTWEIGHT_NORMAL,
*args,
**kws):
wx.SpinCtrl.__init__(self, parent=parent, *args, **kws)
if font == None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font = font
self.fontcolor = fontcolor
self.fontsize = fontsize
self.setFont(self.font)
self.setFontSize(self.fontsize)
self.setFontColor(self.fontcolor)
def setFontSize(self, fontsize):
self.fontsize = fontsize
self.font.SetPointSize(fontsize)
self.SetFont(self.font)
def setFontColor(self, fontcolor):
self.fontcolor = fontcolor
self.SetForegroundColour(self.fontcolor)
def setFontFaceName(self, facename):
self.facename = facename
self.font.SetFaceName(facename)
self.SetFont(self.font)
def setFont(self, font):
self.font = font
self.SetFont(font)
class MyStaticText(wx.StaticText):
def __init__(self,
parent,
font=None,
fontsize=10,
fontcolor='black',
fontweight=wx.FONTWEIGHT_NORMAL,
*args,
**kws):
wx.StaticText.__init__(self, parent=parent, *args, **kws)
if font == None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font = font
self.fontcolor = fontcolor
self.fontsize = fontsize
self.setFont(self.font)
self.setFontSize(self.fontsize)
self.setFontColor(self.fontcolor)
def setFontSize(self, fontsize):
self.fontsize = fontsize
self.font.SetPointSize(fontsize)
self.SetFont(self.font)
def setFontColor(self, fontcolor):
self.fontcolor = fontcolor
self.SetForegroundColour(self.fontcolor)
def setFontFaceName(self, facename):
self.facename = facename
self.font.SetFaceName(facename)
self.SetFont(self.font)
def setFont(self, font):
self.font = font
self.SetFont(font)
class MyTextCtrl(wx.TextCtrl):
def __init__(self,
parent,
font=None,
fontsize=10,
fontcolor='black',
*args,
**kws):
wx.TextCtrl.__init__(self, parent=parent, *args, **kws)
if font == None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font = font
self.fontcolor = fontcolor
self.fontsize = fontsize
self.setFont(self.font)
self.setFontSize(self.fontsize)
self.setFontColor(self.fontcolor)
def setFontSize(self, fontsize):
self.fontsize = fontsize
self.font.SetPointSize(fontsize)
self.SetFont(self.font)
def setFontColor(self, fontcolor):
self.fontcolor = fontcolor
self.SetDefaultStyle(wx.TextAttr(colText=self.fontcolor))
def setFontFaceName(self, facename):
self.facename = facename
self.font.SetFaceName(facename)
self.SetFont(self.font)
def setFont(self, font):
self.font = font
self.SetFont(font)
class MyCheckBox(wx.CheckBox):
def __init__(self,
parent,
font=None,
fontsize=10,
fontcolor='black',
*args,
**kws):
wx.CheckBox.__init__(self, parent=parent, *args, **kws)
if font == None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font = font
self.fontcolor = fontcolor
self.fontsize = fontsize
self.setFont(self.font)
self.setFontSize(self.fontsize)
self.setFontColor(self.fontcolor)
def setFontSize(self, fontsize):
self.fontsize = fontsize
self.font.SetPointSize(fontsize)
self.SetFont(self.font)
def setFontColor(self, fontcolor):
self.fontcolor = fontcolor
#self.SetDefaultStyle(wx.TextAttr(colText = self.fontcolor))
def setFontFaceName(self, facename):
self.facename = facename
self.font.SetFaceName(facename)
self.SetFont(self.font)
def setFont(self, font):
self.font = font
self.SetFont(font)
class MyButton(wx.Button):
def __init__(self,
parent,
font=None,
fontsize=10,
fontcolor='black',
*args,
**kws):
wx.Button.__init__(self, parent=parent, *args, **kws)
if font == None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font = font
self.fontcolor = fontcolor
self.fontsize = fontsize
self.setFont(self.font)
self.setFontSize(self.fontsize)
self.setFontColor(self.fontcolor)
def setFontSize(self, fontsize):
self.fontsize = fontsize
self.font.SetPointSize(fontsize)
self.SetFont(self.font)
def setFontColor(self, fontcolor):
self.fontcolor = fontcolor
self.SetForegroundColour(fontcolor)
def setFontFaceName(self, facename):
self.facename = facename
self.font.SetFaceName(facename)
self.SetFont(self.font)
def setFont(self, font):
self.font = font
self.SetFont(font)
class MyComboBox(wx.ComboBox):
def __init__(self,
parent,
font=None,
fontsize=12,
fontcolor='black',
fontweight=wx.FONTWEIGHT_NORMAL,
*args,
**kws):
wx.ComboBox.__init__(self, parent=parent, *args, **kws)
if font == None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font = font
self.fontcolor = fontcolor
self.fontsize = fontsize
self.setFont(self.font)
self.setFontSize(self.fontsize)
self.setFontColor(self.fontcolor)
def setFontSize(self, fontsize):
self.fontsize = fontsize
self.font.SetPointSize(fontsize)
self.SetFont(self.font)
def setFontColor(self, fontcolor):
self.fontcolor = fontcolor
self.SetForegroundColour(self.fontcolor)
def setFontFaceName(self, facename):
self.facename = facename
self.font.SetFaceName(facename)
self.SetFont(self.font)
def setFont(self, font):
self.font = font
self.SetFont(font)
class MyListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self,
parent,
ID,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
def createwxStaticText(parent,
label,
size=wx.DefaultSize,
style=wx.ALIGN_LEFT,
fontname=wx.SYS_DEFAULT_GUI_FONT,
fontsize=10,
fontweight=wx.FONTWEIGHT_NORMAL,
fontcolor='black'):
font = wx.SystemSettings.GetFont(fontname)
font.SetPointSize(fontsize)
font.SetWeight(fontweight)
st = wx.StaticText(parent=parent, label=label, style=style)
st.SetFont(font)
st.SetForegroundColour(fontcolor)
return st
def createwxTextCtrl(parent,
value='',
style=wx.TE_LEFT,
fontname=wx.SYS_DEFAULT_GUI_FONT,
fontsize=10,
fontweight=wx.FONTWEIGHT_NORMAL,
fontcolor='black'):
font = wx.SystemSettings.GetFont(fontname)
font.SetPointSize(fontsize)
font.SetWeight(fontweight)
textctrl = wx.TextCtrl(parent=parent, value=value, style=style)
textctrl.SetFont(font)
textctrl.SetDefaultStyle(wx.TextAttr(font=font, colText=fontcolor))
return textctrl
def createwxButton(parent,
label,
fontname=wx.SYS_DEFAULT_GUI_FONT,
fontsize=10,
fontweight=wx.FONTWEIGHT_NORMAL,
fontcolor='black',
size=wx.DefaultSize):
font = wx.SystemSettings.GetFont(fontname)
font.SetPointSize(fontsize)
font.SetWeight(fontweight)
btn = wx.Button(parent=parent, label=label, size=size)
btn.SetFont(font)
btn.SetForegroundColour(fontcolor)
return btn
def createwxPanel(parent, backgroundcolor=None, id=wx.ID_ANY):
if backgroundcolor == None:
backgroundcolor = wx.SystemSettings.GetColour(
wx.SYS_COLOUR_BTNFACE).Get()
panel = wx.Panel(parent, id=id)
panel.SetBackgroundColour(backgroundcolor)
return panel
def createwxStaticBox(parent,
label='',
style=wx.ALIGN_LEFT,
fontname=wx.SYS_DEFAULT_GUI_FONT,
fontsize=10,
fontweight=wx.FONTWEIGHT_NORMAL,
fontcolor='black'):
font = wx.SystemSettings.GetFont(fontname)
font.SetPointSize(fontsize)
font.SetWeight(fontweight)
sbox = wx.StaticBox(parent, id=wx.ID_ANY, label=label, style=style)
sbox.SetFont(font)
sbox.SetForegroundColour(fontcolor)
return sbox
def aupmu(gap, xlamd, a=3.44, b=-5.00, c=1.54):
"""
gap, xlamd: [mm]
"""
bfield = a * np.exp(b * (gap / xlamd) + c * (gap / xlamd)**2)
au = 0.934 * (xlamd / 10) * bfield / np.sqrt(2)
return au
def r56chi(
gam0,
ibfield,
imagl=0.150,
idril=0.285, ):
"""
return r56 of chicane, ibfield: [T]
"""
c0 = 299792458.0
m0 = 9.10938188e-31
e0 = 1.60218e-19
r56 = (2.0 / 3.0 * imagl + idril) * 2 * (
np.arcsin(imagl * e0 * ibfield / np.sqrt(gam0**2 - 1) / m0 / c0))**2
return r56
def readfld(filename, ncar=121):
fid = open(filename, 'rb')
data = np.fromfile(fid, 'double').reshape(ncar * ncar, 2)
realp = data[:, 0]
imagp = data[:, 1]
intp = sum(realp**2 + imagp**2)
efield = np.array([complex(realp[i], imagp[i])
for i in range(realp.size)]).reshape(ncar, ncar)
wexy = np.fft.fftshift(np.fft.fft2(efield))
farfield = abs(wexy)**2
return intp, farfield
def getResPath(filename, cwd='.', resdir='../resources'):
"""
return absolute path for resources, e.g. images, data
:param reshead : the relative path for resources dir
:param filename: filename for images or data
"""
resRoot = os.path.join(os.path.abspath(cwd), resdir)
resPath = os.path.join(resRoot, filename)
return resPath
def importCheck(moduleName):
"""
check could import moduleName or not
"""
try:
#return map(__import__, moduleName)
return __import__(moduleName)
except ImportError:
dial = wx.MessageDialog(
None,
message=u"Cannot import module named" + u" '" + moduleName[0] +
u"', please try again!",
caption=u"Module import error",
style=wx.OK | wx.CANCEL | wx.ICON_ERROR | wx.CENTRE)
if dial.ShowModal() == wx.ID_OK:
dial.Destroy()
def hex2rgb(hex_string):
"""
convert hexadecimal color into rgb form.
:param hex_string: hex color string, e.g. white color: '#FFFFFF'
Example:
>>> hex2rgb('#FFAABB')
(255, 170, 187)
"""
rgb = colors.hex2color(hex_string)
return tuple([int(255 * x) for x in rgb])
def rgb2hex(rgb_tuple):
"""
convert color rgb into hex form.
:param rgb_tuple: tuple of rgb color, e.g. white color: (255, 255, 255)
Example:
>>> rgb2hex((255, 170, 187))
u'ffaabb'
"""
return colors.rgb2hex([1.0 * x / 255 for x in rgb_tuple])
def setPath(pathstr):
return os.path.expanduser(
os.path.sep.join(
pathstr.replace('\\', ' ').replace('/', ' ').split(' ')))
def getFileToLoad(parent, ext='*', flag='single'):
if isinstance(ext, list):
if len(ext) > 1:
exts = [x.upper() + ' files (*.' + x + ')|*.' + x for x in ext]
wildcardpattern = '|'.join(exts)
else:
x = ext[0]
wildcardpattern = x.upper() + ' files ' + '(*.' + x + ')|*.' + x
else:
wildcardpattern = ext.upper() + ' files ' + '(*.' + ext + ')|*.' + ext
if flag == 'single':
dial = wx.FileDialog(
parent,
message="Please select file",
defaultDir=".",
defaultFile="",
wildcard=wildcardpattern,
style=wx.FD_DEFAULT_STYLE | wx.FD_FILE_MUST_EXIST)
if dial.ShowModal() == wx.ID_OK:
fullfilename = os.path.join(dial.GetDirectory(),
dial.GetFilename())
return fullfilename
else:
return None
else: #flag = 'multi':
dial = wx.FileDialog(
parent,
message="Please select file",
defaultDir=".",
defaultFile="",
wildcard=wildcardpattern,
style=wx.FD_DEFAULT_STYLE | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE)
if dial.ShowModal() == wx.ID_OK:
fullfilenames = [
os.path.join(dial.GetDirectory(), filename)
for filename in dial.GetFilenames()
]
return fullfilenames
else:
return None
dial.Destroy()
def getFileToSave(parent, ext='*'):
if isinstance(ext, list):
if len(ext) > 1:
exts = [x.upper() + ' files (*.' + x + ')|*.' + x for x in ext]
wildcardpattern = '|'.join(exts)
else:
x = ext[0]
wildcardpattern = x.upper() + ' files ' + '(*.' + x + ')|*.' + x
else:
wildcardpattern = ext.upper() + ' files ' + '(*.' + ext + ')|*.' + ext
dial = wx.FileDialog(
parent,
"Save it as",
wildcard=wildcardpattern,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dial.ShowModal() == wx.ID_OK:
savetofilename = dial.GetPath()
return savetofilename
else:
return None
dial.Destroy()
class SaveData(object):
def __init__(self, data, fname, type, app='imageviewer'):
"""
type: asc, hdf5, sdds
"""
self.data = data
self.fname = fname
self.app = app
self.onDataProcess()
if type == ".asc":
self.onSaveASC()
elif type == '.hdf5':
self.onSaveHDF5()
elif type == '.sdds':
self.onSaveSDDS()
def onDataProcess(self):
xx, yy = np.sum(self.data, 0), np.sum(self.data, 1)
idx, idy = np.where(xx == xx.max()), np.where(yy == yy.max())
self.xpos, self.ypos = idx[0][0], idy[0][0]
self.maxint = self.data.max()
self.sumint = self.data.sum()
def onSaveASC(self):
np.savetxt(self.fname, self.data, fmt='%.16e', delimiter=' ')
def onSaveHDF5(self):
f = h5py.File(self.fname, 'w')
rg = f.create_group('image')
rg.attrs['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S %Z',
time.localtime())
rg.attrs['app'] = self.app
dset = f.create_dataset(
'image/data', shape=self.data.shape, dtype=self.data.dtype)
dset[...] = self.data
dset.attrs['xypos'] = (self.xpos, self.ypos)
dset.attrs['sumint'] = self.sumint
dset.attrs['maxint'] = self.maxint
f.close()
def onSaveSDDS(self):
raise NotImplemented('save sdds format to be implemented.')
class ExportData(object):
def __init__(self, data_raw, data_fit, model_x, model_y, fname):
self.data_raw = data_raw
self.data_fit = data_fit
self.model_x = model_x
self.model_y = model_y
self.fname = fname
self.onProcess()
self.onSave()
def onProcess(self):
# fit:
res_x = self.model_x.get_fit_result()
res_y = self.model_x.get_fit_result()
self.x0 = res_x.params['x0'].value
self.y0 = res_y.params['x0'].value
self.sx = res_x.params['xstd'].value
self.sy = res_y.params['xstd'].value
def onSave(self):
f = h5py.File(self.fname, 'w')
rg = f.create_group('data')
rg.attrs['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S %Z',
time.localtime())
for k, v in self.data_raw.items():
dset = f.create_dataset(
'data/raw/' + k,
shape=v.shape,
dtype=v.dtype,
compression=None)
dset[...] = v
dg = f.create_group('data/fit')
dg.attrs['x0'] = self.x0
dg.attrs['y0'] = self.y0
dg.attrs['xstd'] = self.sx
dg.attrs['ystd'] = self.sy
for k, v in self.data_fit.items():
dset = f.create_dataset(
'data/fit/' + k,
shape=v.shape,
dtype=v.dtype,
compression=None)
dset[...] = v
f.close()
class FloatSlider(wx.Slider):
def __init__(self,
parent,
id=wx.ID_ANY,
value=0,
minValue=0,
maxValue=10,
increment=0.1,
size=wx.DefaultSize,
style=wx.SL_HORIZONTAL,
*args,
**kws):
self._value = value
self._min = minValue
self._max = maxValue
self._inc = increment
ival, imin, imax = [
round(v / increment) for v in (value, minValue, maxValue)
]
self._islider = super(FloatSlider, self)
self._islider.__init__(
parent=parent,
value=ival,
minValue=imin,
maxValue=imax,
id=id,
size=size,
style=style,
*args,
**kws)
self.Bind(wx.EVT_SCROLL, self._OnScroll, self._islider)
def _OnScroll(self, event):
ival = self._islider.GetValue()
imin = self._islider.GetMin()
imax = self._islider.GetMax()
if ival == imin:
self._value = self._min
elif ival == imax:
self._value = self._max
else:
self._value = ival * self._inc
event.Skip()
def GetValue(self):
return self._value
def GetMin(self):
return self._min
def GetMax(self):
return self._max
def GetInc(self):
return self._inc
def GetRange(self):
return self._min, self._max
def SetValue(self, value):
self._islider.SetValue(round(value / self._inc))
self._value = value
def SetMin(self, minval):
self._islider.SetMin(round(minval / self._inc))
self._min = minval
def SetMax(self, maxval):
self._islider.SetMax(round(maxval / self._inc))
self._max = maxval
def SetInc(self, inc):
self._islider.SetRange(round(self._min / inc), round(self._max / inc))
self._islider.SetValue(round(self._value / inc))
self._inc = inc
def SetRange(self, minval, maxval):
self._islider.SetRange(
round(minval / self._inc), round(maxval / self._inc))
self._min = minval
self._max = maxval
def func_sinc(x, y):
r = np.sqrt(x**2 + y**2)
return np.sin(r) / r
def func_peaks(x, y):
return 3 * (1 - x)**2 * np.exp(-(x**2) - (y + 1)**2) - 10 * (
x / 5 - x**3 - y**5
) * np.exp(-x**2 - y**2) - 1 / 3 * np.exp(-(x + 1)**2 - y**2)
class ImageDataFactor(object): # will write into C module, 2015-06-16
def __init__(self, z):
self.imgdata = z
def setData(self, z):
self.imgdata = z
def getData(self):
return imgdata
def getInt(self):
return np.sum(self.imgdata)
class ScanDataFactor(object): # will write into C module, 2015-06-17
def __init__(self, z, scannum, shotnum, ndim=2):
self.scanshape = [scannum, shotnum, ndim]
self.scandata = z.reshape(self.scanshape)
self.scanmean = self.scandata.mean(axis=1)
def show(self):
print([
self.scandata[i, :, :][:, 1].std()
for i in range(0, self.scanshape[0])
])
def setData(self, z):
self.scandata = z
def getXerrbar(self):
self.xerr = [
self.scandata[i, :, :][:, 0].std()
for i in range(0, self.scanshape[0])
]
return np.array(self.xerr)
def getYerrbar(self):
self.yerr = [
self.scandata[i, :, :][:, 1].std()
for i in range(0, self.scanshape[0])
]
return np.array(self.yerr)
def getXavg(self):
return self.scanmean[:, 0]
def getYavg(self):
return self.scanmean[:, 1]
class ProgressBarFrame(wx.Frame):
def __init__(self, parent, title, range=100, *args, **kws):
wx.Frame.__init__(self, parent=parent, title=title, *args, **kws)
self.range = range
self.createProgressbar()
self.SetMinSize((400, 10))
self.Centre()
self.Show()
self.t0 = time.time()
self.elapsed_time_timer.Start(1000)
def createProgressbar(self):
self.pb = wx.Gauge(self)
self.pb.SetRange(range=self.range)
self.elapsed_time_st = createwxStaticText(
self, 'Elapsed Time:', fontsize=10)
self.elapsed_time_val = createwxStaticText(
self, '00:00:00', fontsize=10, fontcolor='red')
vbox_main = wx.BoxSizer(wx.VERTICAL)
hbox_time = wx.BoxSizer(wx.HORIZONTAL)
hbox_time.Add(self.elapsed_time_st, 0,
wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, 5)
hbox_time.Add(self.elapsed_time_val, 0,
wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, 5)
vbox_main.Add(self.pb, 0, wx.EXPAND | wx.ALL, 5)
vbox_main.Add(hbox_time, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizerAndFit(vbox_main)
self.elapsed_time_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.onTickTimer, self.elapsed_time_timer)
def onTickTimer(self, event):
fmt = '%H:%M:%S'
self.elapsed_time_val.SetLabel(
time.strftime(fmt, time.gmtime(time.time() - self.t0)))
def handleConfig(config_name='imageviewer.xml'):
"""
handle configuration files issues:
1 load configuration files from app data
2 create user specific configuration files
:param config_name: configuration file name, by default is 'imageviewer.xml' (for app Image Viewer)
reutrn valid configuration file at default user location
"""
default_location = os.path.expanduser("~/.felapps/config/")
if not os.path.exists(default_location):
os.system('mkdir -p ' + default_location)
default_configfile = os.path.join(default_location, config_name)
optional_configfile = os.path.join(sys.prefix, 'local/share/felapps/',
config_name)
if os.path.isfile(default_configfile
): # configuration file is found at default location
retval = default_configfile
elif os.path.isfile(
optional_configfile
): # load from system location, copy to default user location
shutil.copy2(optional_configfile, default_configfile)
retval = default_configfile
else: # pop window to let user select config file and copy to default user location
config_selected = getFileToLoad(None, ext='xml')
shutil.copy2(config_selected, default_configfile)
retval = default_configfile
return retval
class FitModels(object):
def __init__(self, model='gaussian', params=None, **kws):
"""
fitting models: 'gaussuan' : a * exp(-(x-x0)^2/2/xstd^2) + y0
'polynomial': \Sigma_i=0^n x^i * a_i
'power' : a * x ^ b
'sin' : a * sin(b * x + c) + d
"""
if params is None:
params = lmfit.Parameters()
self._model = model
self._params = params
try:
self._x, self._y = kws['x'], kws['y']
except:
self._x, self._y = [], []
try:
self.n = kws['n']
except:
self.n = 1 # when model is polynomial, highest order
self.n += 1 # range(n + 1): [0, n]
# data fitting window
self.x_fit_min, self.x_fit_max = kws.get('xmin'), kws.get('xmax')
# fitting method
self._method = 'leastsq'
self._set_params_func = {
'gaussian': self._set_params_gaussian,
'polynomial': self._set_params_polynomial,
}
self._fitfunc = {
'gaussian': self._fit_gaussian,
'polynomial': self._fit_polynomial,
}
self._gen_func_text = {
'gaussian': self._gen_func_text_gaussian,
'polynomial': self._gen_func_text_polynomial,
}
self._fit_result = None
@property
def model(self):
return self._model
@model.setter
def mode(self, model):
self._model = model
@property
def method(self):
return self._method
@method.setter
def method(self, method):
self._method = method
def _fit_gaussian(self, p, x):
a = p['a'].value
x0 = p['x0'].value
y0 = p['y0'].value
xstd = p['xstd'].value
return a * np.exp(-(x - x0)**2.0 / 2.0 / xstd / xstd) + y0
def _fit_polynomial(self, p, x):
f = 0
for i in range(self.n):
f += p['a' + str(i)].value * x**i
return f
def _errfunc(self, p, f, x, y):
return f(p, x) - y
def set_data(self, data=None, x=None, y=None):
""" set raw data to fit
"""
if data is not None:
self._x, self._y = data[:, 0], data[:, 1]
else:
if x is not None: self._x = x
if y is not None: self._y = y
def get_data(self):
""" return raw data
"""
return self._x, self._y
def _set_fitfunc(self, type=None):
""" type: gaussian, linear, quadratic, polynomial, power, sin
"""
if type is not None:
self._model = type
def _gen_func_text_gaussian(self, p0):
a = p0['a'].value
x0 = p0['x0'].value
y0 = p0['y0'].value
xstd = p0['xstd'].value
retfun = '$f(x) = a e^{-\\frac{(x-x_0)^2}{2\sigma_x^2}}+y_0$'
retcoe = '$a = %.3f, x_0 = %.3f, \sigma_x = %.3f, y_0 = %.3f$' % (a,
x0,
xstd,
y0)
return {'func': retfun, 'fcoef': retcoe}
def _gen_func_text_polynomial(self, p0):
retfun = '$f(x) = \sum_{i=0}^{%s}\,a_i x^i$' % (self.n)
retcoe = ','.join([
'$a_{%d} = %.3f$' % (i, p0['a' + str(i)].value)
for i in range(self.n)
])
return {'func': retfun, 'fcoef': retcoe}
def set_params(self, **p0):
"""p0: initial parameters dict"""
self._set_params_func[self._model](p0)
def _set_params_gaussian(self, p0):
self._params.add('a', value=p0['a'])
self._params.add('x0', value=p0['x0'])
self._params.add('y0', value=p0['y0'])
self._params.add('xstd', value=p0['xstd'])
def _set_params_polynomial(self, p0):
for i in range(self.n):
pi_name = 'a' + str(i)
self._params.add(pi_name, value=p0[pi_name])
def get_fitfunc(self, p0=None):
if p0 is None:
p0 = self._fit_result.params
f_func = self._fitfunc[self._model]
gen_func = self._gen_func_text[self._model]
f_text = gen_func(p0)
return f_func, f_text
def get_fit_result(self):
return self._fit_result
def fit(self):
p = self._params
f = self._fitfunc[self._model]
x, y = self._x, self._y
xmin = self.x_fit_min if self.x_fit_min is not None else x.min()
xmax = self.x_fit_max if self.x_fit_max is not None else x.max()
x_fit, idx = get_range(x, xmin, xmax)
y_fit = y[idx]
m = self._method
res = lmfit.minimize(
self._errfunc, p, method=m, args=(f, x_fit, y_fit))
self._fit_result = res
return res
def fit_report(self):
# gaussian model
if self._model == 'gaussian':
if self._fit_result is not None:
p = self._fit_result.params
retstr1 = "Fitting Function:" + "\n"
retstr2 = "a*exp(-(x-x0)^2/2/sx^2)+y0" + "\n"
retstr3 = "Fitting Output:" + "\n"
retstr4 = "{a0_k:<3s}: {a0_v:>10.4f}\n".format(
a0_k='a', a0_v=p['a'].value)
retstr5 = "{x0_k:<3s}: {x0_v:>10.4f}\n".format(
x0_k='x0', x0_v=p['x0'].value)
retstr6 = "{sx_k:<3s}: {sx_v:>10.4f}\n".format(
sx_k='sx', sx_v=p['xstd'].value)
retstr7 = "{y0_k:<3s}: {y0_v:>10.4f}".format(
y0_k='y0', y0_v=p['y0'].value)
return retstr1 + retstr2 + retstr3 + retstr4 + retstr5 + retstr6 + retstr7
else:
return "Nothing to report."
elif self._model == 'polynomial':
if self._fit_result is not None:
p = self._fit_result.params
retstr = "Fitting Function:" + "\n"
fstr = '+'.join(
['a' + str(i) + '*x^' + str(i) for i in range(self.n)])
fstr = fstr.replace('*x^0', '')
fstr = fstr.replace('x^1', 'x')
retstr += fstr + '\n'
retstr += "Fitting Output:" + "\n"
for i in range(self.n):
ki = 'a' + str(i)
retstr += "{k:<3s}: {v:>10.4f}\n".format(
k=ki, v=p[ki].value)
return retstr
else:
return "Nothing to report."
def calc_p0(self):
""" return p0 from input x, y
"""
if self._model == 'gaussian':
x, xdata = self._x, self._y
x0 = np.sum(x * xdata) / np.sum(xdata)
p0 = {
'a': xdata.max(),
'x0': x0,
'xstd': (np.sum((x - x0)**2 * xdata) / np.sum(xdata))**0.5,
'y0': 0,
}
elif self._model == 'polynomial':
p0 = {'a' + str(i): 1 for i in range(self.n)}
return p0
def get_randstr(length=1):
""" return string of random picked up chars
:param length: string length to return
"""
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
retval = ''.join([random.choice(chars) for _ in range(length)])
return retval
def get_file_info(filepath):
""" return file information
:param filepath: file full path name
"""
f_info = os.stat(filepath)
f_ctime = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(f_info.st_ctime))
f_size_bytes = f_info.st_size
f_name = os.path.basename(filepath)
return {'name': f_name, 'ctime': f_ctime, 'bytes': f_size_bytes}
def gaussian_fit(x, xdata, mode='full'):
""" return fit result and fitmodels
:param x: data to fit, x col, numpy array
:param xdata: data to fit, y col, numpy array
"""
fm = FitModels()
x0 = np.sum(x * xdata) / np.sum(xdata)
p0 = {
'a': xdata.max(),
'x0': x0,
'xstd': (np.sum((x - x0)**2 * xdata) / np.sum(xdata))**0.5,
'y0': 0
}
fm.set_data(x=x, y=xdata)
fm.set_params(**p0)
res = fm.fit()
if mode == 'full':
return res, fm
elif mode == 'simple':
return [res.params[k].value for k in ('x0', 'xstd')]
class AnalysisPlotPanel(uiutils.MyPlotPanel):
def __init__(self, parent, data=None, **kwargs):
""" data: m x n image array
"""
if data is None:
x = y = np.linspace(-np.pi, np.pi, 100)
xx, yy = np.meshgrid(x, y)
data = func_sinc(xx, yy)
#data = np.zeros([50, 50])
self.data = data
self.cmap = 'jet'
# axis directions
self.xaxis_direction = True # left->right: small->big
self.yaxis_direction = True # bottom->up : small->big
self.line_color = wx.Colour(255, 165,
0).GetAsString(wx.C2S_HTML_SYNTAX)
self.mec = wx.Colour(255, 0, 0).GetAsString(wx.C2S_HTML_SYNTAX)
self.mfc = wx.Colour(255, 0, 0).GetAsString(wx.C2S_HTML_SYNTAX)
# pos markers M1 and M2
self.mkc1 = wx.Colour(255, 0, 0).GetAsString(wx.C2S_HTML_SYNTAX)
self.mkc2 = wx.Colour(240, 230, 140).GetAsString(wx.C2S_HTML_SYNTAX)
self.pcc = wx.Colour(0, 0, 0).GetAsString(wx.C2S_HTML_SYNTAX)
self.mk1, self.mk2 = False, False
uiutils.MyPlotPanel.__init__(self, parent, **kwargs)
# specific relationship between self and the parent? frame
self.mframe_point = self.parent.GetParent().GetParent()
def set_color(self, rgb_tuple):
""" set figure and canvas with the same color.
:param rgb_tuple: rgb color tuple,
e.g. (255, 255, 255) for white color
"""
if rgb_tuple is None:
rgb_tuple = wx.SystemSettings.GetColour(
wx.SYS_COLOUR_WINDOWFRAME).Get()
clr = [c / 255.0 for c in rgb_tuple]
self.figure.set_facecolor(clr)
self.figure.set_edgecolor(clr)
def _init_plot(self):
pass
def on_press(self, event):
if event.inaxes:
x0, y0 = event.xdata, event.ydata
self.draw_hvlines(x0, y0)
def on_release(self, event):
pass
#x0, y0 = event.xdata, event.ydata
#self.x0, self.y0 = x0, y0
def set_markflags(self, mk1=False, mk2=False):
self.mk1, self.mk2 = mk1, mk2
try:
if self.mk1 == True:
self._draw_hvlines1(self.x_pos1, self.y_pos1)
elif self.mk2 == True:
self._draw_hvlines2(self.x_pos2, self.y_pos2)
except:
return
def set_mkc1(self, color):
self.mkc1 = color
def set_mkc2(self, color):
self.mkc2 = color
def set_pcc(self, color):
self.pcc = color
if hasattr(self, 'pc1'):
self.pc1.set_mec(color)
self.pc1.set_mfc(color)
if hasattr(self, 'pc2'):
self.pc2.set_mec(color)
self.pc2.set_mfc(color)
if hasattr(self, 'plbl1'):
self.plbl1.set_color(color)
if hasattr(self, 'plbl2'):
self.plbl2.set_color(color)
self.refresh()
def draw_hvlines(self, x0, y0):
if self.mk1 == True:
self._draw_hvlines1(x0, y0)
elif self.mk2 == True:
self._draw_hvlines2(x0, y0)
try:
self.update_deltxy()
except:
pass
def _draw_hvlines1(self, x0, y0):
if hasattr(self, 'hl1'):
self.hl1.set_ydata([y0, y0])
else:
self.hl1 = self.axes.axhline(y0, ls='--')
self.hl1.set_color(self.mkc1)
if hasattr(self, 'vl1'):
self.vl1.set_xdata([x0, x0])
else:
self.vl1 = self.axes.axvline(x0, ls='--')
self.vl1.set_color(self.mkc1)
if hasattr(self, 'pc1'):
self.pc1.set_data(x0, y0)
else:
self.pc1, = self.axes.plot(x0, y0, 'ko', ms=6, mfc='k', mec='k')
self.pc1.set_mec(self.pcc)
self.pc1.set_mfc(self.pcc)
if hasattr(self, 'plbl1'):
self.plbl1.set_position((x0, y0))
else:
self.plbl1 = self.axes.text(x0, y0, r'$\mathsf{M1}$', fontsize=16)
self.plbl1.set_color(self.pcc)
self.x_pos1, self.y_pos1 = x0, y0
try:
self.mframe_point.m1_pos_st.SetLabel(
'{0:.1f},{1:.1f}'.format(x0, y0))
except:
pass
self.refresh()
def _draw_hvlines2(self, x0, y0):
if hasattr(self, 'hl2'):
self.hl2.set_ydata([y0, y0])
else:
self.hl2 = self.axes.axhline(y0, color='r', ls='--')
self.hl2.set_color(self.mkc2)
if hasattr(self, 'vl2'):
self.vl2.set_xdata([x0, x0])
else:
self.vl2 = self.axes.axvline(x0, color='r', ls='--')
self.vl2.set_color(self.mkc2)
if hasattr(self, 'pc2'):
self.pc2.set_data(x0, y0)
else:
self.pc2, = self.axes.plot(x0, y0, 'ko', ms=6, mfc='k', mec='k')
self.pc2.set_mec(self.pcc)
self.pc2.set_mfc(self.pcc)
if hasattr(self, 'plbl2'):
self.plbl2.set_position((x0, y0))
else:
self.plbl2 = self.axes.text(x0, y0, r'$\mathsf{M2}$', fontsize=16)
self.plbl2.set_color(self.pcc)
self.x_pos2, self.y_pos2 = x0, y0
try:
self.mframe_point.m2_pos_st.SetLabel(
'{0:.1f},{1:.1f}'.format(x0, y0))
except:
pass
self.refresh()
def update_deltxy(self):
m1_pos_val = self.mframe_point.m1_pos_st.GetLabel()
m2_pos_val = self.mframe_point.m2_pos_st.GetLabel()
if m1_pos_val != '' and m2_pos_val != '':
x1, y1 = [float(i) for i in m1_pos_val.split(',')]
x2, y2 = [float(i) for i in m2_pos_val.split(',')]
dx = abs(x1 - x2)
dy = abs(y1 - y2)
self.mframe_point.delx_val_st.SetLabel("{0:.1f}".format(dx))
self.mframe_point.dely_val_st.SetLabel("{0:.1f}".format(dy))
def set_colormap(self, cmap):
self.cmap = cmap
self.image.set_cmap(cmap)
self.refresh()
def set_linecolor(self, color):
self.line_color = color
[line.set_color(color) for line in self.line_list]
self.refresh()
def set_fontsize(self, fontsize):
x_lbl = self.axes.get_xlabel()
y_lbl = self.axes.get_ylabel()
self.axes.set_xlabel(x_lbl, fontsize=fontsize + 4)
self.axes.set_ylabel(y_lbl, fontsize=fontsize + 4)
self.axes.tick_params(labelsize=fontsize)
self.refresh()
def set_line_id(self, line='raw'):
""" selected current editable line,
'raw': raw data
'fitted': fitted lines
'none': hide all lines
'show': show all lines
"""
if line == 'none':
self.linex.set_visible(False)
self.liney.set_visible(False)
self.linex_fit.set_visible(False)
self.liney_fit.set_visible(False)
self.line_list = []
elif line == 'show':
self.linex.set_visible(True)
self.liney.set_visible(True)
self.linex_fit.set_visible(True)
self.liney_fit.set_visible(True)
self.line_list = [
self.linex, self.liney, self.linex_fit, self.liney_fit
]
elif line == 'raw':
self.linex.set_visible(True)
self.liney.set_visible(True)
self.linex_fit.set_visible(False)
self.liney_fit.set_visible(False)
self.line_list = [self.linex, self.liney]
elif line == 'fit':
self.linex.set_visible(False)
self.liney.set_visible(False)
self.linex_fit.set_visible(True)
self.liney_fit.set_visible(True)
self.line_list = [self.linex_fit, self.liney_fit]
self.refresh()
def set_lines(self):
if self.data is None:
return
data = self.data
hx, hy = np.sum(data, 0), np.sum(data, 1)
idxmaxx, idxmaxy = np.where(hx == hx.max()), np.where(hy == hy.max())
maxidx, maxidy = idxmaxx[0][0], idxmaxy[0][0]
x, y = np.arange(hx.size), np.arange(hy.size)
hx = hx / hx.max() * maxidy
hy = hy / hy.max() * maxidx
res_x, fm_x = gaussian_fit(x, hx)
res_y, fm_y = gaussian_fit(y, hy)
self.linex, = self.axes.plot(x, hx)
self.liney, = self.axes.plot(hy, y)
self.linex.set_color(self.line_color)
self.liney.set_color(self.line_color)
self.linex.set_marker('')
self.linex.set_markersize(5)
self.linex.set_mec(self.mec)
self.linex.set_mfc(self.mfc)
self.liney.set_marker('')
self.liney.set_markersize(5)
self.liney.set_mec(self.mec)
self.liney.set_mfc(self.mfc)
# fitted lines
x_fit = np.linspace(x.min(), x.max(), 200)
y_fit = np.linspace(y.min(), y.max(), 200)
fx, tx = fm_x.get_fitfunc(res_x.params)
fy, ty = fm_y.get_fitfunc(res_y.params)
self.linex_fit, = self.axes.plot(x_fit, fx(res_x.params, x_fit))
self.liney_fit, = self.axes.plot(fy(res_y.params, y_fit), y_fit)
self.linex_fit.set_color(self.line_color)
self.liney_fit.set_color(self.line_color)
self.linex_fit.set_marker('')
self.linex_fit.set_markersize(5)
self.linex_fit.set_mec(self.mec)
self.linex_fit.set_mfc(self.mfc)
self.liney_fit.set_marker('')
self.liney_fit.set_markersize(5)
self.liney_fit.set_mec(self.mec)
self.liney_fit.set_mfc(self.mfc)
self.axes.set_xlim([x.min(), x.max()])
self.axes.set_ylim([y.min(), y.max()])
# hide all lines
self.linex.set_visible(False)
self.liney.set_visible(False)
self.linex_fit.set_visible(False)
self.liney_fit.set_visible(False)
self.refresh()
self.res_x, self.res_y = res_x, res_y
self.line_list = []
def get_fit_report(self, xoy='x'):
""" return fitting report if success,
else return None
"""
if xoy == 'x':
p = self.res_x.params
else:
p = self.res_y.params
retstr2 = "f(x) = a*exp(-(x-x0)^2/2/sx^2)+y0" + "\n"
retstr4 = " {a0_k:<3s}: {a0_v:>10.4f}\n".format(
a0_k='a', a0_v=p['a'].value)
retstr5 = " {x0_k:<3s}: {x0_v:>10.4f}\n".format(
x0_k='x0', x0_v=p['x0'].value)
retstr6 = " {sx_k:<3s}: {sx_v:>10.4f}\n".format(
sx_k='sx', sx_v=p['xstd'].value)
retstr7 = " {y0_k:<3s}: {y0_v:>10.4f}".format(
y0_k='y0', y0_v=p['y0'].value)
retval = retstr2 + retstr4 + retstr5 + retstr6 + retstr7
if 'nan' in retval:
return None
else:
return retval
def set_figure_data(self, data, fit=True):
self.data = data
if not hasattr(self, 'axes'):
self.axes = self.figure.add_subplot(111, aspect=1.0)
self.z = data
self.image = self.axes.imshow(self.z, cmap=self.cmap)
if fit:
self.set_lines()
else:
dimx, dimy = self.z.shape
x, y = np.arange(dimy), np.arange(dimx)
self.image.set_extent([x.min(), x.max(), y.min(), y.max()])
self.refresh()
def set_linestyle(self, ls):
[line.set_linestyle(ls) for line in self.line_list]
self.refresh()
def set_marker(self, mk):
[line.set_marker(mk) for line in self.line_list]
self.refresh()
def set_markersize(self, ms):
[line.set_markersize(ms) for line in self.line_list]
self.refresh()
def set_mec(self, c):
self.mec = c
[line.set_mec(c) for line in self.line_list]
self.refresh()
def set_mfc(self, c):
self.mfc = c
[line.set_mfc(c) for line in self.line_list]
self.refresh()
def set_linewidth(self, lw):
[line.set_linewidth(lw) for line in self.line_list]
self.refresh()
def set_ticks(self, flag='half'):
# ticks position
if flag == 'half':
s = ['on', 'on', 'off', 'off']
self.axes.tick_params(labelbottom=s[0])
self.axes.tick_params(labelleft=s[1])
self.axes.tick_params(labeltop=s[2])
self.axes.tick_params(labelright=s[3])
elif flag == 'all':
s = ['on', 'on', 'on', 'on']
self.axes.tick_params(labelbottom=s[0])
self.axes.tick_params(labelleft=s[1])
self.axes.tick_params(labeltop=s[2])
self.axes.tick_params(labelright=s[3])
self.refresh()
def set_mticks(self, flag='off'):
if flag == 'on':
self.axes.minorticks_on()
self.refresh()
elif flag == 'off':
self.axes.minorticks_off()
self.refresh()
def set_grids(self, color, b=None, which='major'):
if b is None:
self.axes.grid(which=which, color=color, linestyle='--')
else:
self.axes.grid(b=False)
self.refresh()
def set_origin(self, ll=False, ul=False, ur=False, lr=False):
if ll:
self.direct_xyaxis(True, True)
self.axes.xaxis.set_ticks_position('bottom')
self.axes.yaxis.set_ticks_position('left')
self.axes.xaxis.set_label_position('bottom')
self.axes.yaxis.set_label_position('left')
if ul:
self.direct_xyaxis(True, False)
self.axes.xaxis.set_ticks_position('top')
self.axes.yaxis.set_ticks_position('left')
self.axes.xaxis.set_label_position('top')
self.axes.yaxis.set_label_position('left')
if ur:
self.direct_xyaxis(False, False)
self.axes.xaxis.set_ticks_position('top')
self.axes.yaxis.set_ticks_position('right')
self.axes.xaxis.set_label_position('top')
self.axes.yaxis.set_label_position('right')
if lr:
self.direct_xyaxis(False, True)
self.axes.xaxis.set_ticks_position('bottom')
self.axes.yaxis.set_ticks_position('right')
self.axes.xaxis.set_label_position('bottom')
self.axes.yaxis.set_label_position('right')
self.refresh()
def get_clim(self):
clim = self.image.get_clim()
return "{cmin:.1f} : {cmax:.1f}".format(cmin=clim[0], cmax=clim[1])
def set_clim(self, cr):
clim = sorted(float(i) for i in cr.split(':'))
self.image.set_clim(clim)
self.refresh()
def direct_xyaxis(self, x_direction, y_direction):
if self.xaxis_direction != x_direction:
self.axes.invert_xaxis()
self.xaxis_direction = x_direction
if self.yaxis_direction != y_direction:
self.axes.invert_yaxis()
self.yaxis_direction = y_direction
def hide_image(self, hide_flag):
self.image.set_visible(not hide_flag)
self.refresh()
def clear(self):
if hasattr(self, 'axes'):
self.axes.cla()
def get_data(self):
""" return data: image, raw data, fit data
"""
data = {}
data['raw'] = {}
data['fit'] = {}
data['attr'] = {}
data['image'] = self.image.get_array()
data['raw']['prof_x'] = self.linex.get_data()
data['raw']['prof_y'] = self.liney.get_data()
data['fit']['prof_x'] = self.linex_fit.get_data()
data['fit']['prof_y'] = self.liney_fit.get_data()
data['attr']['x0'] = self.res_x.params['x0'].value
data['attr']['sx'] = self.res_x.params['xstd'].value
data['attr']['y0'] = self.res_y.params['x0'].value
data['attr']['sy'] = self.res_y.params['xstd'].value
return data
class ScanPlotPanel(AnalysisPlotPanel):
def __init__(self, parent, data=None, **kws):
AnalysisPlotPanel.__init__(self, parent, data, **kws)
self.line_mean = None
self.eb_mks = None
self.eb_lines = None
self.line_fit = None
self.pick_obj_text = None
self._init_config()
self._init_plot_new()
def _init_plot(self):
pass
def _init_plot_new(self):
self.x, self.y, self.xerrarr, self.yerrarr = np.nan, np.nan, np.nan, np.nan
if not hasattr(self, 'axes'):
self.axes = self.figure.add_subplot(111)
self.ebplot = self.axes.errorbar(
self.x,
self.y,
xerr=self.xerrarr,
yerr=self.yerrarr,
fmt=self.eb_fmt,
color=self.avg_linecolor,
linewidth=self.avg_lw,
ls=self.avg_ls,
marker=self.avg_marker,
ms=self.avg_ms,
mfc=self.avg_mfc,
mec=self.avg_mec,
elinewidth=self.eb_lw,
ecolor=self.eb_markercolor,
capsize=self.eb_markersize,
capthick=self.eb_mew, )
self.line_mean, self.eb_mks, self.eb_lines = self.ebplot
self._edit_obj = {
'marker': [
self.line_mean,
],
'line': [
self.line_mean,
]
}
self.pick_pt = {}
self.line_mean.set_picker(5)
self.canvas.draw()
def _init_config(self):
self.eb_fmt = ''
self.eb_markercolor = '#1E90FF'
self.eb_markersize = 10
self.eb_mew = 2
self.eb_lw = 1
self.avg_ls = '--'
self.avg_lw = 1
self.avg_linecolor = 'g'
self.avg_mfc = 'r'
self.avg_mec = 'r'
self.avg_ms = 10
self.avg_marker = 'H'
def get_edit_obj(self):
return self._edit_obj
def get_mean_line(self):
return self.line_mean
def get_errorbar_mks(self):
return self.eb_mks
def get_errorbar_line(self):
return self.eb_lines
def get_fit_line(self):
return self.line_fit
def set_fit_model(self, model='gaussian', **kws):
x, y = self.x, self.y
fit_model = FitModels(model=model, **kws)
fit_model.method = 'leastsq'
fit_model.set_data(x=x, y=y)
p0 = fit_model.calc_p0()
fit_model.set_params(**p0)
fit_res = fit_model.fit()
fx, tx = fit_model.get_fitfunc(fit_res.params)
self.fx = fx
self.fit_res = fit_res
self.fit_model = fit_model
def set_fit_line(self, point_num=200, **kws):
""" apply fitting model to average curve
"""
try:
self.line_fit.remove()
except:
pass
x = self.x
x_fit_min, x_fit_max = kws.get('xmin'), kws.get('xmax')
xmin = x_fit_min if x_fit_min is not None else x.min()
xmax = x_fit_max if x_fit_max is not None else x.max()
x_fit = np.linspace(xmin, xmax, point_num)
y_fit = self.fx(self.fit_res.params, x_fit)
self.line_fit, = self.axes.plot(x_fit, y_fit, 'b', ls='solid', lw=2)
self.refresh()
def get_fit_result(self):
return self.fit_res
def get_fit_model(self):
return self.fit_model
def hide_fit_line(self):
self.line_fit.set_visible(False)
self.refresh()
def set_line_id(self, line='Average Curve'):
""" selected current editable line,
'Average Curve': curve of mean value of every iteration
'Errorbars' : errorbars, x and y
'Fitting Curve': fitting curve of average curve
"""
if line == 'Average Curve':
self._edit_obj = {
'marker': [self.line_mean],
'line': [
self.line_mean,
]
}
elif line == 'Errorbars':
self._edit_obj = {'marker': self.eb_mks, 'line': self.eb_lines}
elif line == 'Fitting Curve':
self._edit_obj = {
'marker': [
self.line_fit,
],
'line': [
self.line_fit,
]
}
def repaint(self):
self.adjustErrbar(self.ebplot, self.x, self.y, self.xerrarr,
self.yerrarr)
self.axes.relim()
self.axes.autoscale_view(False, True, True)
self.refresh()
def adjustErrbar(self, err, x, y, x_error, y_error):
ln, (errx_top, errx_bot, erry_top, erry_bot), (barsx, barsy) = err
ln.set_data(x, y)
x_base = x
y_base = y
xerr_top = x_base + x_error
xerr_bot = x_base - x_error
yerr_top = y_base + y_error
yerr_bot = y_base - y_error
errx_top.set_xdata(xerr_top)
errx_bot.set_xdata(xerr_bot)
errx_top.set_ydata(y_base)
errx_bot.set_ydata(y_base)
erry_top.set_xdata(x_base)
erry_bot.set_xdata(x_base)
erry_top.set_ydata(yerr_top)
erry_bot.set_ydata(yerr_bot)
new_segments_x = [
np.array([[xt, y], [xb, y]])
for xt, xb, y in zip(xerr_top, xerr_bot, y_base)
]
new_segments_y = [
np.array([[x, yt], [x, yb]])
for x, yt, yb in zip(x_base, yerr_top, yerr_bot)
]
barsx.set_segments(new_segments_x)
barsy.set_segments(new_segments_y)
def on_motion(self, event):
if event.inaxes:
x0, y0 = event.xdata, event.ydata
self.pos_st.SetLabel('({x:<.4f}, {y:<.4f})'.format(x=x0, y=y0))
#self._draw_hvlines1(x0, y0)
def set_linecolor(self, color):
[line.set_color(color) for line in self._edit_obj['line']]
self.refresh()
def set_linestyle(self, ls):
[line.set_linestyle(ls) for line in self._edit_obj['line']]
self.refresh()
def set_linewidth(self, lw):
[line.set_linewidth(lw) for line in self._edit_obj['line']]
self.refresh()
def set_marker(self, mk):
[line.set_marker(mk) for line in self._edit_obj['marker']]
self.refresh()
def set_mks(self, mks):
[line.set_markersize(mks) for line in self._edit_obj['marker']]
self.refresh()
def set_mew(self, mew):
[line.set_mew(mew) for line in self._edit_obj['marker']]
self.refresh()
def set_mfc(self, color):
[line.set_mfc(color) for line in self._edit_obj['marker']]
self.refresh()
def set_mec(self, color):
l = self._edit_obj['marker']
[line.set_mec(color) for line in self._edit_obj['marker']]
self.refresh()
def set_grid(self):
self.axes.grid()
self.refresh()
def set_legend(self, **kws):
show_val = kws.get('show')
if not show_val:
self.legend_box.set_visible(False)
else:
try:
l_avg = kws.get(
'avg') if kws.get('avg') is not None else 'Average'
l_fit = kws.get(
'fit') if kws.get('fit') is not None else 'Fitting'
self.line_mean.set_label(l_avg)
self.line_fit.set_label(l_fit)
self.legend_box = self.axes.legend()
self.legend_box.set_visible(True)
except:
pass
self.refresh()
def set_title(self, **kws):
show_val = kws.get('show')
if not show_val: # if auto_title is ticked, show title, else not
self.title_box.set_visible(False)
else:
time_now = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime())
title_str = kws.get(
'title') if kws.get('title') is not None else time_now
self.title_box = self.axes.set_title(title_str)
self.title_box.set_visible(True)
self.refresh()
def set_xlabel(self, **kws):
show_val = kws.get('show')
if not show_val:
self.xlabel_box.set_visible(False)
else:
xlabel_str = kws.get(
'xlabel') if kws.get('xlabel') is not None else u'$x$'
self.xlabel_box = self.axes.set_xlabel(xlabel_str)
self.xlabel_box.set_visible(True)
self.refresh()
def set_text(self, text, **kws):
""" set fitting result output as a Text object
:param text: fitting result text from FitModels class
available keys of kws:, x, y, fontsize
"""
if hasattr(self, 'func_text'):
self.func_text.remove()
if kws.get('x') is None or kws.get('y') is None:
self.func_text = self.axes.text(
0.,
0.,
text,
bbox=dict(facecolor='#E6E5E4', alpha=0.5),
transform=self.axes.transData,
fontsize=kws.get('fontsize'))
else:
self.func_text = self.axes.text(
kws['x'],
kws['y'],
text,
bbox=dict(facecolor='#E6E5E4', alpha=0.5),
fontsize=kws.get('fontsize'))
self.func_text.set_picker(True)
self.refresh()
def update_text(self, **kws):
""" available keys of kws:, x, y, fontsize
"""
fs = kws.get('fontsize')
if fs is None:
fs = self.func_text.get_fontsize()
self.func_text.set_fontsize(fs)
x, y = kws.get('x'), kws.get('y')
o_x, o_y = self.func_text.get_position()
if x is not None:
o_x = x
if y is not None:
o_y = y
self.func_text.set_position((o_x, o_y))
self.refresh()
def on_pick(self, event):
if isinstance(event.artist, mText):
self.pick_obj_text = event.artist
self.pick_pos = event.mouseevent.xdata, event.mouseevent.ydata
if isinstance(event.artist, mLine):
self.pick_obj_p0 = event.artist
idx = event.ind
x, y = self.pick_obj_p0.get_data()
x0, y0 = np.take(x, idx), np.take(y, idx)
if self.pick_pt.get(idx[0]) is None:
pt0, = self.axes.plot(x0, y0)
pt0.set_marker(self.pick_obj_p0.get_marker())
pt0.set_ms(self.pick_obj_p0.get_ms())
pt0.set_mew(2.0)
ic = invert_color_hex(color_to_hex(self.pick_obj_p0.get_mfc()))
pt0.set_mec(ic)
pt0.set_mfc(ic)
self.pick_pt[idx[0]] = pt0
else:
self.pick_pt[idx[0]].remove()
self.pick_pt.pop(idx[0])
self.refresh()
def get_pick_pt(self):
""" return picked points dict
{k:v}, k: index, v: drawing artist
"""
return self.pick_pt
def clear_pick_pt(self):
""" clear picked points (which are picked for retaking)
"""
for k, v in self.pick_pt.iteritems():
v.remove()
self.pick_pt.clear()
self.refresh()
def on_press(self, event):
pass
#print event.xdata, event.ydata
def on_release(self, event):
if event.inaxes:
n_x, n_y = event.xdata, event.ydata
if self.pick_obj_text is not None:
o_x, o_y = self.pick_obj_text.get_position()
x = o_x + n_x - self.pick_pos[0]
y = o_y + n_y - self.pick_pos[1]
self.pick_obj_text.set_position((x, y))
self.pick_obj_text = None
self.refresh()
def pick_color():
dlg = wx.ColourDialog(None)
dlg.GetColourData().SetChooseFull(True) # only windows
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().GetColour()
dlg.Destroy()
return color
def set_staticbmp_color(obj, color):
"""
obj: staticbitmap, bitmapbutton
color: could be returned by pick_color()
"""
r, g, b = color.Red(), color.Green(), color.Blue()
w, h = 16, 16
bmp = wx.Bitmap(w, h)
img = bmp.ConvertToImage()
img.SetRGB(wx.Rect(0, 0, w, h), r, g, b)
obj.SetBitmap(wx.Bitmap(img))
def get_range(x, xmin, xmax):
""" find array range,
:param x: orignal numpy array, 1d
:param xmin: x min of range
:param xmax: x max of range
return range_index and array
"""
if xmin >= xmax:
return x, np.arange(x.size)
idx1, idx2 = np.where(x > xmin), np.where(x < xmax)
idx = np.intersect1d(idx1, idx2)
return x[idx], idx
def color_to_hex(c):
""" convert matplotlib colors into hex string format,
e.g.
1 color_to_hex('r') = '#FF0000'
2 color_to_hex('red') = '#FF0000'
3 color_to_hex('#FF0000') = '#FF0000'
"""
if c.startswith('#'):
clr = c
else:
try:
clr = colors.rgb2hex(colors.colorConverter.colors[c])
except:
clr = colors.cnames[c]
return clr
def invert_color_hex(hex_color_str):
""" invert hex colors,
e.g. invert_color_hex('#FFFFFF') = '#000000'
"""
table = string.maketrans('0123456789abcdef', 'fedcba9876543210')
hex_str = str(hex_color_str[1:].lower())
return '#' + hex_str.translate(table).upper()
| mit |
haesemeyer/RegionSelector | utilities.py | 1 | 4881 | import pyqtgraph as pg
import matplotlib.path as mpath
import h5py
import numpy as np
import warnings
class RegionContainer:
"""
Container for saving and loading RegionROI information
"""
def __init__(self, positions, region_name: str, z_index: int):
"""
Create a new RegionContainer
:param positions: The polygon vertices of the ROI
:param region_name: The name of this region
:param z_index: The index of the z-plane that this ROI came from
"""
self.positions = positions
self.region_name = region_name
self.z_index = z_index
def point_in_region(self, point):
"""
Tests whether a point is within the region or outside
:param point: The test point
:return: True if the point is within the region False if on a boundary or outside
"""
# even when setting closed to True we still need to supply the first point twice
poly_path = mpath.Path(self.positions + [self.positions[0]], closed=True)
return poly_path.contains_point(point)
@staticmethod
def save_container_list(container_list, dfile: h5py.File):
"""
Saves a list of RegionContainer objects to an hdf5 file
:param container_list: The list of RegionContainer objects to save
:param dfile: Handle of hdf5 file to which the list should be saved
"""
key = 0
for rc in container_list:
while str(key) in dfile:
key += 1
dfile.create_group(str(key))
dfile[str(key)].create_dataset(name="positions", data=np.vstack(rc.positions))
dfile[str(key)].create_dataset(name="region_name", data=rc.region_name)
dfile[str(key)].create_dataset(name="z_index", data=rc.z_index)
@staticmethod
def load_container_list(dfile: h5py.File):
"""
Loads a list of RegionContainer objects from an hdf5 file
:param dfile: Handle of hdf5 file from which list should be loaded
:return: A list of RegionContainer objects
"""
container_list = []
for k in dfile.keys():
try:
pos = np.array(dfile[k]["positions"])
pos = [(p[0], p[1]) for p in pos]
rn = str(np.array(dfile[k]["region_name"]))
zi = int(np.array(dfile[k]["z_index"]))
rc = RegionContainer(pos, rn, zi)
container_list.append(rc)
except KeyError:
warnings.warn("Found non RegionContainer object in file {0}".format(dfile.filename))
continue
return container_list
# Class RegionContainer
class RegionROI(pg.PolyLineROI):
"""
Extension of pyqtgraph's PolyLineROI
"""
def __init__(self, positions, tag_id, region_name: str, z_index: int, **args):
"""
Create new region ROI
:param positions: The vertex positions of the new ROI
:param tag_id: A unique ROI id
:param region_name: The name associated with this region
:param z_index: The index of the z-plane that contains this ROI
:param args: Other arguments passed to PolyLineROI
"""
self.tag_id = tag_id
self.z_index = z_index
if region_name == "":
region_name = str(tag_id)
self.region_name = region_name
super().__init__(positions, closed=True, **args)
def get_vertex_list(self):
"""
Obtain the handles/vertices of this ROI
:return: list of (x, y) tuples with the image-vertex coordinates
"""
plist = self.getLocalHandlePositions()
transform = self.getGlobalTransform({'pos': pg.Point(0, 0), 'size': pg.Point(1, 1), 'angle': 0})
assert transform.getAngle() == 0.0
assert transform.getScale()[0] == 1
assert transform.getScale()[1] == 1
tx = transform.getTranslation()[0]
ty = transform.getTranslation()[1]
return [(p[1].x() + tx, p[1].y() + ty) for p in plist]
def get_container(self):
"""
Copies the non-ui related ROI information to a simple container object for saving
:return: A RegionContainer object with all important information
"""
return RegionContainer(self.get_vertex_list(), self.region_name, self.z_index)
@staticmethod
def from_container(container: RegionContainer, tag_id, **args):
"""
Creates a new RegionROI from a container object
:param container: The container with the ROI information
:param tag_id: A unique ROI id
:param args: Other arguments passed to PolyLineROI
:return: A new RegionROI object corresponding to the container
"""
return RegionROI(container.positions, tag_id, container.region_name, container.z_index, **args)
# Class RegionROI
| mit |
redreamality/tushare | setup.py | 21 | 2592 | from setuptools import setup, find_packages
import codecs
import os
import tushare
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
TuShare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
Upgrade
---------------
pip install tushare --upgrade
Quick Start
--------------
::
import tushare as ts
ts.get_hist_data('600848')
return::
open high close low volume p_change ma5 \
date
2012-01-11 6.880 7.380 7.060 6.880 14129.96 2.62 7.060
2012-01-12 7.050 7.100 6.980 6.900 7895.19 -1.13 7.020
2012-01-13 6.950 7.000 6.700 6.690 6611.87 -4.01 6.913
2012-01-16 6.680 6.750 6.510 6.480 2941.63 -2.84 6.813
2012-01-17 6.660 6.880 6.860 6.460 8642.57 5.38 6.822
2012-01-18 7.000 7.300 6.890 6.880 13075.40 0.44 6.788
2012-01-19 6.690 6.950 6.890 6.680 6117.32 0.00 6.770
2012-01-20 6.870 7.080 7.010 6.870 6813.09 1.74 6.832
"""
setup(
name='tushare',
version=tushare.__version__,
description='A utility for crawling historical and Real-time Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='[email protected]',
license='BSD',
url='http://tushare.org',
keywords='china stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock','tushare.data','tushare.util'],
package_data={'': ['*.csv']},
) | bsd-3-clause |
MJuddBooth/pandas | pandas/core/computation/engines.py | 4 | 3796 | """
Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas.compat import map
from pandas import compat
from pandas.core.computation.align import _align, _reconstruct_object
from pandas.core.computation.ops import (
UndefinedVariableError, _mathops, _reductions)
import pandas.io.formats.printing as printing
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "{expr}" '
'overlap with builtins: ({s})'
.format(expr=expr, s=s))
class AbstractEngine(object):
"""Object serving as a base class for all engines."""
__metaclass__ = abc.ABCMeta
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super(NumExprEngine, self).__init__(expr)
def convert(self):
return str(super(NumExprEngine, self).convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = compat.text_type(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super(PythonEngine, self).__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| bsd-3-clause |
weissercn/learningml | learningml/GoF/analysis/S-4mu/plot_S-4mu_p1D_alphaSvalue_analysis.py | 1 | 9748 | from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
# Options for mode 'lower_level'
MODE = 'S-4mu_WigD'
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf")
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# S - 4 mu WigD
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if MODE == 'S-4mu_WigD':
#param_list = [0.1,0.08,0.06,0.04,0.02,0.0]
param_list = [0.1,0.08,0.06,0.04]
param_list = [0.0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1]
#param_list = [0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09]
ml_classifiers = ['nn','bdt']
ml_classifiers_colors = ['green','magenta','cyan']
ml_classifiers_bin = 5
chi2_color = 'red'
chi2_splits = [1,2,3,4,5,6,7,8,9,10]
#chi2_splits = [8]
ml_folder_name = "S-4mu_WigD/evaluation_S-VV-4mu_WigD_updated3"
chi2_folder_name = "S-4mu_WigD"
#chi2_folder_name = "event_shapes_lower_level_without_Mult"
ml_file_name = "{1}_S-VV-4mu_WigD_updated3_{0}_syst_0_01__chi2scoring_5_p_values"
chi2_file_name = "S-4mu_WigD_updated3_{0}D_chi2_{1}_splits_p_values"
#chi2_file_name = "event_shapes_lower_level_syst_0_01_attempt4_without_Mult__{0}D_chi2_{1}_splits_p_values"
chi2_1D_file_name = "S-4mu_WigD_updated3_1D_{0}D_chi2_{1}_splits_p_values"
title = "S-4mu"
name = "S-4mu"
CL = 0.95
ml_classifiers_dict={}
chi2_splits_dict={}
chi2_1D_splits_dict={}
#xwidth = [0.5]*len(param_list)
xwidth = np.subtract(param_list[1:],param_list[:-1])/2.
xwidth_left = np.append(xwidth[0] , xwidth)
xwidth_right = np.append(xwidth,xwidth[-1])
print("xwidth : ", xwidth)
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
if True:
for ml_classifier_index, ml_classifier in enumerate(ml_classifiers):
ml_classifiers_dict[ml_classifier]= []
for param in param_list:
p_values = np.loadtxt(os.environ['learningml']+"/GoF/optimisation_and_evaluation/"+ml_folder_name+"/"+ml_classifier+"/"+ml_file_name.format(param,ml_classifier,ml_classifiers_bin)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
ml_classifiers_dict[ml_classifier].append(p_values_in_CL)
ml_classifiers_dict[ml_classifier]= np.divide(ml_classifiers_dict[ml_classifier],100.)
ax.errorbar(param_list,ml_classifiers_dict['nn'], yerr=binomial_error(ml_classifiers_dict['nn']), linestyle='-', marker='s', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[0], label=r'$ANN$',clip_on=False)
print("bdt : ", ml_classifiers_dict['bdt'])
ax.errorbar(param_list,ml_classifiers_dict['bdt'], yerr=binomial_error(ml_classifiers_dict['bdt']), linestyle='-', marker='o', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[1], label=r'$BDT$', clip_on=False)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_splits_dict[str(chi2_split)]=[]
chi2_best = []
for param in param_list:
chi2_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_splits_dict[str(chi2_split)].append(temp)
chi2_best_dim.append(temp)
temp_best = np.max(chi2_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_1D_splits_dict[str(chi2_split)]=[]
chi2_1D_best = []
for param in param_list:
chi2_1D_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_1D_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_1D_splits_dict[str(chi2_split)].append(temp)
chi2_1D_best_dim.append(temp)
temp_best = np.max(chi2_1D_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_1D_best.append(temp_best)
#print("chi2_best : ",chi2_best)
print("param_list : ",param_list)
print("chi2_best : ", chi2_best)
print("chi2_splits_dict : ", chi2_splits_dict)
ax.errorbar(param_list,chi2_best, yerr=binomial_error(chi2_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='black', label=r'$\chi^2$', clip_on=False)
ax.errorbar(param_list,chi2_1D_best, yerr=binomial_error(chi2_1D_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='blue', label=r'$\chi^2 1D$', clip_on=False)
print("ml_classifiers_dict : ",ml_classifiers_dict)
print("chi2_best : ", chi2_best)
#ax.plot((0.1365,0.1365),(0.,1.),c="grey",linestyle="--")
ax.set_xlim([0.,0.1])
#ax.set_xlim([0.129,0.1405])
ax.set_ylim([0.,1.])
ax.set_xlabel(r"$p_{signal}$")
ax.set_ylabel("Fraction rejected")
#a, b, c = [0.130,0.133], [0.1365],[0.14]
#ax.set_xticks(a+b+c)
#xx, locs = plt.xticks()
#ll = ['%.3f' % y for y in a] + ['%.4f' % y for y in b] + ['%.3f' % y for y in c]
#plt.xticks(xx, ll)
#ax.legend(loc='lower left', frameon=False, numpoints=1)
fig_leg = plt.figure(figsize=(8,2.7))
ax_leg = fig_leg.add_axes([0.0,0.0,1.0,1.0])
plt.tick_params(axis='x',which='both',bottom='off', top='off', labelbottom='off')
plt.tick_params(axis='y',which='both',bottom='off', top='off', labelbottom='off')
ax_leg.yaxis.set_ticks_position('none')
ax_leg.set_frame_on(False)
plt.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left',frameon=False, numpoints=1,ncol=2)
fig_leg.savefig("S-4mu_WigD_updated3_analysis_legend.pdf")
#fig_name=name+"_alphaSvalue_analysis"
fig_name="S-4mu_WigD_updated3_analysis"
fig.savefig(fig_name+".pdf")
fig.savefig(fig_name+"_"+time.strftime("%b_%d_%Y")+".pdf")
print("Saved the figure as" , fig_name+".pdf")
| mit |
avistous/QSTK | Tools/DataGenerate_SineWave.py | 2 | 2050 |
import datetime as dt
import csv
import copy
import os
import pickle
import math
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# QSTK imports
from qstkutil import qsdateutil as du
import qstkutil.DataEvolved as de
def write(ls_symbols, d_data, ldt_timestamps):
ldt_timestamps.reverse()
ls_keys = ['actual_open', 'actual_high', 'actual_low', 'actual_close', 'volume', 'close']
length = len(ldt_timestamps)
for symbol in ls_symbols:
sym_file = open('./' + symbol + '.csv', 'w')
sym_file.write("Date,Open,High,Low,Close,Volume,Adj Close \n")
for i,date in enumerate(ldt_timestamps):
date_to_csv = '{:%Y-%m-%d}'.format(date)
string_to_csv = date_to_csv
for key in ls_keys:
string_to_csv = string_to_csv + ',' + str(d_data[symbol][length-i-1])
string_to_csv = string_to_csv + '\n'
sym_file.write(string_to_csv)
def main():
print "Creating Stock data from Sine Waves"
dt_start = dt.datetime(2000, 1, 1)
dt_end = dt.datetime(2012, 10, 31)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
x = np.array(range(len(ldt_timestamps)))
ls_symbols = ['SINE_FAST', 'SINE_SLOW', 'SINE_FAST_NOISE', 'SINE_SLOW_NOISE']
sine_fast = 10*np.sin(x/10.) + 100
sine_slow = 10*np.sin(x/30.) + 100
sine_fast_noise = 10*(np.sin(x/10.) + np.random.randn(x.size)) + 100
sine_slow_noise = 10*(np.sin(x/30.) + np.random.randn(x.size)) + 100
d_data = dict(zip(ls_symbols, [sine_fast, sine_slow, sine_fast_noise, sine_slow_noise]))
write(ls_symbols, d_data, ldt_timestamps)
plt.clf()
plt.plot(ldt_timestamps, sine_fast)
plt.plot(ldt_timestamps, sine_slow)
plt.plot(ldt_timestamps, sine_fast_noise)
plt.plot(ldt_timestamps, sine_slow_noise)
plt.ylim(50,150)
plt.xticks(size='xx-small')
plt.legend(ls_symbols, loc='best')
plt.savefig('test.png',format='png')
if __name__ == '__main__':
main() | bsd-3-clause |
tensorflow/minigo | oneoffs/embeddings_graphs.py | 8 | 3394 | #!/usr/bin/env python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import subprocess
import time
from absl import app, flags
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from tqdm import tqdm
flags.DEFINE_string('embedding_file', None, 'Where to save the embeddings.')
flags.DEFINE_integer(
'pca_dims', None,
help='None to skip PCA, else number of dimensions to reduce to')
flags.DEFINE_bool(
'produce_pngs', False,
help='if true call sgftopng for all positions')
flags.mark_flag_as_required('embedding_file')
flags.register_validator(
'embedding_file',
lambda ef: ef.endswith('.pickle') and os.path.isfile(ef),
'embedding_file must be an existing .pickle file')
FLAGS = flags.FLAGS
def main(argv):
t0 = time.time()
embedding_file = FLAGS.embedding_file
with open(embedding_file, 'rb') as pickle_file:
metadata, embeddings = pickle.load(pickle_file)
t1 = time.time()
reduced = embeddings
if FLAGS.pca_dims:
pca = PCA(n_components=FLAGS.pca_dims)
pca_result = pca.fit_transform(embeddings)
print('Explained variation per principal component:')
print(pca.explained_variance_ratio_)
print('Total Explained: {:.4f}'.format(
sum(pca.explained_variance_ratio_)))
print()
reduced = pca_result
t2 = time.time()
print('Shape:', reduced.shape)
tsne = TSNE(
n_components=2,
verbose=4,
perplexity=40,
n_iter=2000,
min_grad_norm=5e-5)
coords = tsne.fit_transform(reduced)
assert len(coords.shape) == 2, coords.shape[1] == 2
# scale coords to be [0,1] in both dims
coords -= [min(coords[:, 0]), min(coords[:, 1])]
coords /= max(coords.flatten())
t3 = time.time()
for i, (path, move) in enumerate(tqdm(metadata)):
assert path.endswith('.sgf'), path
png = '{}_{}.png'.format(path[:-4], move)
assert '/eval/' in png, png
png = png.replace('/eval/', '/thumbnails/')
if FLAGS.produce_pngs and not os.path.exists(png):
# NOTE: sgftopng is a pain to install, sorry.
with open(path) as sgf_file:
subprocess.run(
['sgftopng', png, '-' + str(move + 1)],
stdin=sgf_file)
metadata[i] = (path, move, png)
t4 = time.time()
print('Read {:.2f}s, PCA {:.2f}s t-SNE {:.2f}s, PNGs {:.2f}s'.format(
t1 - t0, t2 - t1, t3 - t2, t4 - t3))
new_file = embedding_file.replace('.pickle', '.graph.pickle')
assert new_file != embedding_file, (new_file, embedding_file)
with open(new_file, 'wb') as pickle_file:
pickle.dump([metadata, embeddings, coords], pickle_file)
print('TSNE cords added to', new_file)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
silky/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModelFrame.py | 22 | 2075 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris, resample
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
mYst = resample(np.maximum(-200, mXr), mXr.size*stocf) # decimate the mag spectrum
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(2,1,1)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-100,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', lw=2, markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(2,1,2)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.6, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.0, label='mXr')
binFreq = (fs/2.0)*np.arange(mYst.size)/(mYst.size)
plt.plot(binFreq,mYst,'r', lw=1.5, label='mYst')
plt.axis([0,maxplotfreq,-100,max(mYh)+2])
plt.legend(prop={'size':15})
plt.title('mYh + mXr + mYst')
plt.tight_layout()
plt.savefig('hpsModelFrame.png')
plt.show()
| agpl-3.0 |
simongibbons/numpy | doc/source/reference/random/performance.py | 14 | 2599 | from collections import OrderedDict
from timeit import repeat
import pandas as pd
import numpy as np
from numpy.random import MT19937, PCG64, Philox, SFC64
PRNGS = [MT19937, PCG64, Philox, SFC64]
funcs = OrderedDict()
integers = 'integers(0, 2**{bits},size=1000000, dtype="uint{bits}")'
funcs['32-bit Unsigned Ints'] = integers.format(bits=32)
funcs['64-bit Unsigned Ints'] = integers.format(bits=64)
funcs['Uniforms'] = 'random(size=1000000)'
funcs['Normals'] = 'standard_normal(size=1000000)'
funcs['Exponentials'] = 'standard_exponential(size=1000000)'
funcs['Gammas'] = 'standard_gamma(3.0,size=1000000)'
funcs['Binomials'] = 'binomial(9, .1, size=1000000)'
funcs['Laplaces'] = 'laplace(size=1000000)'
funcs['Poissons'] = 'poisson(3.0, size=1000000)'
setup = """
from numpy.random import {prng}, Generator
rg = Generator({prng}())
"""
test = "rg.{func}"
table = OrderedDict()
for prng in PRNGS:
print(prng)
col = OrderedDict()
for key in funcs:
t = repeat(test.format(func=funcs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
col = pd.Series(col)
table[prng().__class__.__name__] = col
npfuncs = OrderedDict()
npfuncs.update(funcs)
npfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype="uint32",size=1000000)'
npfuncs['64-bit Unsigned Ints'] = 'randint(2**64,dtype="uint64",size=1000000)'
setup = """
from numpy.random import RandomState
rg = RandomState()
"""
col = {}
for key in npfuncs:
t = repeat(test.format(func=npfuncs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
table['RandomState'] = pd.Series(col)
columns = ['MT19937','PCG64','Philox','SFC64', 'RandomState']
table = pd.DataFrame(table)
order = np.log(table).mean().sort_values().index
table = table.T
table = table.reindex(columns)
table = table.T
table = table.reindex([k for k in funcs], axis=0)
print(table.to_csv(float_format='%0.1f'))
rel = table.loc[:, ['RandomState']].values @ np.ones(
(1, table.shape[1])) / table
rel.pop('RandomState')
rel = rel.T
rel['Overall'] = np.exp(np.log(rel).mean(1))
rel *= 100
rel = np.round(rel)
rel = rel.T
print(rel.to_csv(float_format='%0d'))
# Cross-platform table
rows = ['32-bit Unsigned Ints','64-bit Unsigned Ints','Uniforms','Normals','Exponentials']
xplat = rel.reindex(rows, axis=0)
xplat = 100 * (xplat / xplat.MT19937.values[:,None])
overall = np.exp(np.log(xplat).mean(0))
xplat = xplat.T.copy()
xplat['Overall']=overall
print(xplat.T.round(1))
| bsd-3-clause |
arizona-phonological-imaging-lab/Autotrace | installer-scripts/python3/mac_autotrace_installer.py | 1 | 19896 | '''
Mac Autotrace Installer
This python2.7 script will automatically install all of the dependences to run
the AutoTrace software from the APIL (Arizona Phonological Imaging Lab) on
a macintosh computer.
Installs MacPorts and uses it to prepare the following dependencies:
python27
python-select
GTK
numpy
OpenCV
MatPlotLib
qt4
Also checks for matlab installation, but matlab is not required for installation
(though it is required to make AutoTrace run properly, ATM.)
Changes to be made:
qt stuff? (refer to the Autotrace readme)
possibly:
tk gui interface after install?
Needs Sudo!
By Gustave Powell and Trevor Sullivan
'''
import subprocess as sp
import re
import shlex
import urllib.request, urllib.parse, urllib.error
import os
import sys
import time
import webbrowser
import tkinter
class Installer(object):
def __init__(self):
self.user_home = os.path.expanduser("~/")
self.user_name = self.user_home.split("/")[-2]
self.profile_path = os.path.join(self.user_home,".bash_profile")
self.github_path = os.path.join(self.user_home,"github/")
self.downloads_folder = os.path.join(self.user_home, "Downloads/")
self.macports_dst = os.path.join(self.downloads_folder, "macports_installer.pkg")
self.version = self.check_OSX_version()
def exit_on_error(self):
print("Exiting...")
sys.exit(1)
def progress(self, process):
while process.poll() is None:
print(".")
time.sleep(3)
def prepare_installation(self):
self.find_matlab()
self.check_root()
self.check_xcode()
self.check_macports()
def install(self):
print("Updating macports database...")
p = sp.Popen(shlex.split("sudo port selfupdate"), stdout=sp.PIPE, stderr=sp.PIPE)
p = self.progress(p)
print("Upgrading outdated ports...")
o, e = sp.Popen(shlex.split("sudo port upgrade outdated"), stdout=sp.PIPE, stderr=sp.PIPE).communicate()
self.check_python()
self.check_qt4()
self.check_gtk()
self.check_numpy()
self.check_cv()
self.check_matplot()
self.check_git()
self.check_clone()
self.set_crontab()
print("Success!") #point user to ReadMe file or give URL to place on github (possibly even hyperlink?)
#tk window that says something like "install complete, click here for more info"
self.check_clone()
self.set_crontab()
self.after_install_gui()
'''
Check for matlab install and ask permission to continue
'''
def find_matlab(self):
Applications_folder, _ = sp.Popen(["ls", "Applications"], stdout=sp.PIPE, stderr=sp.PIPE, ).communicate()
matlab_installed = Applications_folder.find("MATLAB")
if matlab_installed > 0:
pass
else:
print("You don't seem to have MATLAB installed. Autotrace may not run properly without it.")
answer = raw_input("Would you like to continue with the installation anyway? (y/n) ").lower()
if answer == "n":
self.exit_on_error()
elif answer == "y":
return
else:
print("Try again")
self.find_matlab()
'''
Checks for matlab version
'''
def check_matlab_version(self):
out, err = sp.Popen(shlex.split("matlab -nosplash -nodesktop -r 'exit'"), stdout=sp.PIPE, stderr=sp.PIPE, ).communicate()
if out:
print("You've got MATLAB already with the proper license")
if err:
print("You do not have the proper MATLAB license. Please check your install.")
def check_root(self):
"""
check if user has root priviliges
"""
o, e = sp.Popen(["groups"], stderr = sp.PIPE, stdout = sp.PIPE, shell = True).communicate()
oa, e = sp.Popen(["whoami"], stderr = sp.PIPE, stdout = sp.PIPE, shell = True).communicate()
if "admin" not in o:
print("ERROR: You need administrative rights to complete the installation")
self.exit_on_error()
def check_OSX_version(self):
#find Mac OSX version
version_pattern = re.compile("([0-9.]+)")
version = sp.Popen("sw_vers", stdout=sp.PIPE, stderr=sp.PIPE)
out, _ = version.communicate()
version = out.split('\n')
version = re.search(version_pattern, version[1]).group(1)
return version
def install_macports(self):
"""
Download appropriate version
and open installer
"""
#print("creating file: {0}".format(os.path.expanduser("~/.bash_profile")))
if not os.path.exists(self.profile_path):
print("bash_profile doesn't exist, creating it now")
open(self.profile_path, "w").close()
#sp.Popen(["sudo", "chown", "$USER", "~/.bash_profile"])
o, e =sp.Popen(["sudo", "chmod", "777", self.profile_path], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
print("chmod out {0}".format(o))
print("chmod err {0}".format(e))
else:
o, e = sp.Popen(["$PATH"], stderr=sp.PIPE, stdout=sp.PIPE, shell=True).communicate()
sys_path = os.environ['PATH']
if "/opt/local/bin" in sys_path and '/opt/local/sbin' in sys_path:
print("PATH is correct")
elif "/opt/local/bin" not in sys_path:
newfile = open(self.profile_path, "a")
newfile.write("\nexport PATH=/opt/local/bin:$PATH\n")
newfile.close()
os.environ['PATH'] = ":".join([sys_path, '/opt/local/bin'])
sys_path = os.environ['PATH']
elif "/opt/local/sbin" not in sys_path:
newfile = open(self.profile_path, "a")
newfile.write("\nexport PATH=/opt/local/sbin:$PATH\n")
newfile.close()
os.environ['PATH'] = ":".join([sys_path, '/opt/local/sbin'])
sys_path = os.environ['PATH']
else:
print("adding to PATH")
newfile = open(self.profile_path, "a")
# gotta look up the code for this part
newfile.write("\nexport PATH=/opt/local/bin:/opt/local/sbin:$PATH\n")
newfile.close()
o, e = sp.Popen(["sudo", "chmod", "777", self.profile_path], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
o, e = sp.Popen(["sudo", "chown", self.user_name, self.profile_path], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
if self.version.startswith("10.9"):
print("You're using Mavericks\n")
macports_src = "https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.9-Mavericks.pkg"
elif self.version.startswith("10.8"):
print("You're using Mountain Lion\n")
macports_src = "https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.8-MountainLion.pkg"
elif self.version.startswith("10.7"):
print("You're using Lion\n")
macports_src = "https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.7-Lion.pkg"
elif self.version.startswith("10.6"):
print("You're using Snow Leopard\n")
macports_src = "https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.6-SnowLeopard.pkg"
else:
print("Mac OSX version not recognized.")
self.exit_on_error()
#urllib.request.urlretrieve(macports_src, macports_dst)
f = urllib.urlopen(macports_src)
with open(self.macports_dst, "wb") as pkgFile:
pkgFile.write(f.read())
#open macports installer...
macports_installer = sp.Popen(shlex.split("open {pkg}".format(pkg=self.macports_dst)), stdout=sp.PIPE, stderr=sp.PIPE)
print("follow the instructions in the installer")
o, e = macports_installer.communicate()
raw_input("Press Enter after finishing MacPorts install")
#macports_installer.wait()
self.install()
def check_macports(self):
"""
Check to see if macport installed
"""
port_found, _ = sp.Popen(shlex.split("which port"), stdout=sp.PIPE, stderr=sp.PIPE).communicate()
print(port_found)
if not port_found:
print("We need to install Macports...Let's see what system you're using...")
self.install_macports()
else:
print("Macports already installed!")
self.install()
#see if xcode tools installed
def check_xcode(self):
p = sp.Popen(["pkgutil", "--pkg-info=com.apple.pkg.CLTools_Executables"], stdout=sp.PIPE, stderr=sp.PIPE)
o, e = p.communicate()
if "No receipt" in e:
print("Xcode not installed")
if self.version.startswith("10.9"):
try:
print("Installing XCode command line tools")
o, e = sp.Popen(shlex.split("xcode-select --install"), stdout=sp.PIPE, stderr=sp.PIPE).communicate()
raw_input("Press Enter after finishing Command Line Tools install")
except:
print("You need to install XCode tools! \n You'll find it in the app store.")
sys.exit()
else:
print("Please install XCode tools. ")
print("1. Install 'XCode' from the app store")
print("2. run Xcode")
print("3. go to menu > preferences > downloads")
print("4. select 'command line tools'")
self.exit_on_error()
else:
print("Xcode installed")
'''
try:
gcc_configured = sp.Popen(shlex.split("gcc --version"), stdout=sp.PIPE, stderr=sp.PIPE)
o, e = gcc_configured.communicate()
if "--prefix=/Applications/Xcode.app/Contents/Developer/usr" in e:
print("XCode command line tools appear to be installed.")
except:
'''
def get_installed_list(self):
port_installed, _ = sp.Popen(shlex.split("port installed"), stdout=sp.PIPE, stderr=sp.PIPE).communicate()
return port_installed
def check_python(self):
'''
Checks for python27 installation
'''
installed = self.get_installed_list()
if re.search(".*python27 [@0-9._]+? \(active\)", installed):
print("You have the correct version of Python installed (2.7)")
else:
print("You do not have the correct version of python installed. Installing Python 2.7 using MacPorts")
self.install_python()
def install_python(self):
p = sp.Popen(shlex.split("sudo port install python27"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
out, err = p.communicate()
if not err:
print(out)
return
else:
print(err)
print("Python 2.7 install failed. (port install python27)")
self.exit_on_error()
print("Setting python27 as system default...")
o, e = sp.Popen("sudo port select --set python python27").communicate()
def check_qt4(self):
installed = self.get_installed_list()
pos = installed.find('qt4-mac')
if pos > 0:
print("You have QT4 installed")
else:
print("You do not have qt4 installed. Installing qt4 using MacPorts (this may take a while, be patient)")
self.install_qt4()
def install_qt4(self):
p = sp.Popen(shlex.split("sudo port install qt4-mac"), stdout=sp.PIPE, stderr=sp.PIPE)
while p.poll() is None:
print(".",)
time.sleep(3)
out, err = p.communicate()
if not err:
print(out)
return
else:
print(err)
print("qt4 install failed. (port install qt4-mac)")
self.exit_on_error()
print("Setting qt4 as system default...")
o, e = sp.Popen("sudo port select --set qt4 qt4-mac").communicate()
def check_gtk(self):
'''
checks for gtk installation, if not found, installs using methods below
'''
installed = self.get_installed_list()
gtk_pos = installed.find('py27-pygtk ')
gtk_doc_pos = installed.find('gtk-doc ')
gtk2_pos = installed.find('gtk2 ')
gtk3_pos = installed.find('gtk3 ')
if gtk_pos > 0:
print("pyGTK already installed!")
else:
print("Installing pyGTK using MacPorts")
self.install_gtk()
'''
if gtk_doc_pos > 0:
print("gtk-doc already installed!")
else:
print("Installing gtk-doc using MacPorts")
self.install_gtkdoc()
'''
if gtk2_pos > 0:
print("gtk2 already installed!")
else:
print("Installing gtk2 using MacPorts")
self.install_gtk2()
if gtk3_pos > 0:
print("gtk3 already installed!")
else:
print("Installing gtk3 using MacPorts")
self.install_gtk3()
def install_gtk(self):
p = sp.Popen(shlex.split("sudo port install py27-pygtk"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("pygtk install failed. (port install py27-pygtk)")
print(err)
self.exit_on_error()
def install_gtkdoc(self):
p = sp.Popen(shlex.split("sudo port install gtk-doc"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("gtk-doc install failed. (port install gtk-doc)")
print(err)
self.exit_on_error()
def install_gtk2(self):
p = sp.Popen(shlex.split("sudo port install gtk2"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("gtk2 install failed. (port install gtk2)")
print(err)
self.exit_on_error()
def install_gtk3(self):
p = sp.Popen(shlex.split("sudo port install gtk3"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("gtk3 install failed. (port install gtk3)")
print(err)
self.exit_on_error()
def check_numpy(self):
'''
checks for numpy installation. If not found, installs with methods below
'''
installed = self.get_installed_list()
numpy_pos = installed.find("py27-numpy ")
if numpy_pos > 0:
print("Looks like we've got numpy already")
else:
print("Oh Noes! We need numpy. Installing with MacPorts")
self.install_numpy()
def install_numpy(self):
p = sp.Popen(shlex.split("sudo port install py27-numpy"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("numpy install failed. (port install py27-numpy)")
print(err)
self.exit_on_error()
def check_cv(self):
'''
take a guess
'''
installed = self.get_installed_list()
opencv_pos = installed.find("opencv ")
if opencv_pos > 0:
print("OpenCV is here")
else:
print("OpenCV is missing, time for MacPorts")
self.install_cv()
def install_cv(self):
p = sp.Popen(shlex.split("sudo port install opencv +qt4 +python27"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("OpenCV install failed. (port install opencv)")
print(err)
self.exit_on_error()
def check_matplot(self):
'''
yep
'''
installed = self.get_installed_list()
matplot_pos = installed.find("matplotlib")
if matplot_pos > 0:
print("Looks like we've got matplotlib over here")
else:
print("matplotlib is missing, installing with MacPorts")
self.install_matplot()
def install_matplot(self):
p = sp.Popen(shlex.split("sudo port install py27-matplotlib +gtk3 +gtk2 +qt4"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("Matplotlib install failed. (port install py27-matplotlib)")
print(err)
self.exit_on_error()
def check_git(self):
installed = self.get_installed_list()
matplot_pos = installed.find("git-core")
if matplot_pos > 0:
print("Looks like git is installed")
else:
print("git is missing, installing with MacPorts")
self.install_git()
def install_git(self):
p = sp.Popen(shlex.split("sudo port install git-core"), stdout=sp.PIPE, stderr=sp.PIPE)
self.progress(p)
output, err = p.communicate()
if not err:
return
else:
print("Git install failed. (port install git-core)")
print(err)
self.exit_on_error()
#github stuff
def check_clone(self):
if not os.path.exists(self.github_path):
print("github directory doesn't exist, creating it now")
print(self.github_path)
os.makedirs(self.github_path)
o, e = sp.Popen(["sudo", "chmod", "777", self.github_path], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
o, e = sp.Popen(["sudo", "chown", self.user_name, self.github_path], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
if not os.path.exists(os.path.join(self.github_path, "old/")):
print("Downloading Autotrace to ~/github/")
self.clone_autotrace_repo()
else:
print("Looks like you have autotrace installed already")
def clone_autotrace_repo(self):
p = sp.Popen(["sudo git clone https://github.com/jjberry/Autotrace.git {0}".format(self.github_path)], stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
self.progress(p)
o, e = p.communicate()
print("git clone output")
print(o)
print("git clone error")
print(e)
# may be unnecessary.
#if [failure because of non-empty repo]
# p = sp.Popen([shlex.split("rm -rf"), self.github_path], stderr=sp.PIPE, stdout=sp.PIPE)
# self.check_clone()
#updates
def set_crontab(self):
print("Creating UpdateAutotrace file in /usr/local")
f = open("/usr/local/UpdateAutotrace", 'w')
f.write('#!/bin/bash\
\
cd /Users/$USER/github\
\
sudo git pull') #This will overwrite whatever's already there, but it shouldn't matter because the file
#won't change. If we want we can add an if statement so that this doesn't happen if the file
#exists and has what we want in it.
f.close()
o, e = sp.Popen(["sudo", "chmod", "777", "/usr/local/UpdateAutotrace"], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
o, e = sp.Popen(["sudo", "chown", self.user_name, "/usr/local/UpdateAutotrace"], stderr=sp.PIPE, stdout=sp.PIPE).communicate()
print("Creating crontab job to run the /usr/local/UpdateAutotrace script every Sunday and Thursday at noon")
if not os.path.exists("/etc/crontab"):
open("/etc/crontab", "w").close()
with open('/etc/crontab', 'r+') as f:
contents = f.read()
if contents.find("automatically updates autotrace") > 0:
print("crontab job already set")
else:
f.write("\n 0 12 * * 0,4 /usr/local/UpdateAutotrace #This script automatically updates autotrace \n\t\
#on sundays and thursdays at noon\n")
#f.write("\n * * * * * echo 'test' >> /Users/apiladmin/Downloads/test.txt") <- uncomment to test cron
sp.Popen(["crontab", "/etc/crontab"]) #this refreshes crontab so that it starts counting
'''
This would work if we install the CronTab module, which would require some downloading, unpacking, and installing.
I figure it's best to just change the file we want directly.
try:
from crontab import CronTab
except:
https://pypi.python.org/packages/source/p/python-crontab/python-crontab-1.7.2.tar.gz
#unpack and cd into this archive
sp.Popen(["sudo python setup.py install"], stderr=sp.PIPE, stdout=sp.PIPE)
from crontab import CronTab
cron = new CronTab()
if cron.find_comment("automatically updates autotrace"):
return
else:
job = cron.new(command=/usr/local/UpdateAutotrace, comment="This script automatically updates autotrace on sundays \
and thursdays at noon")
job.setall('0 12 * * 0,4')
'''
def after_install_gui(self):
top = tkinter.Tk()
top.geometry('300x150')
top.title("AutoTrace")
label = tkinter.Label(top, text = "Welcome to Autotrace!",
font = 'Helvetica -18 bold')
label.pack(fill=tkinter.Y, expand=1)
folder = tkinter.Button(top, text='Click here to view Autotrace files',
command = lambda: (os.system("open "+self.github_path)))
folder.pack()
readme = tkinter.Button(top, text='ReadMe',
command = lambda: (os.system("open "+os.path.join(self.github_path, "README.md"))), activeforeground ='blue',
activebackground = 'green')
readme.pack()
apilsite = tkinter.Button(top, text='Look here for more info on the project',
command = lambda: (webbrowser.open("http://apil.arizona.edu")), activeforeground = 'purple',
activebackground = 'orange')
apilsite.pack()
quit = tkinter.Button(top, text='Quit',
command=top.quit, activeforeground='red',
activebackground='black')
quit.pack()
tkinter.mainloop()
def test_git(self):
self.after_install_gui()
if __name__ == '__main__':
installer = Installer()
installer.prepare_installation()
| mit |
kcavagnolo/astroML | astroML/correlation.py | 3 | 12069 | """
Tools for computing two-point correlation functions.
"""
import warnings
import numpy as np
from sklearn.neighbors import BallTree
from .utils import check_random_state
# Check if scikit-learn's two-point functionality is available.
# This was added in scikit-learn version 0.14
try:
from sklearn.neighbors import KDTree
sklearn_has_two_point = True
except ImportError:
import warnings
sklearn_has_two_point = False
def uniform_sphere(RAlim, DEClim, size=1):
"""Draw a uniform sample on a sphere
Parameters
----------
RAlim : tuple
select Right Ascension between RAlim[0] and RAlim[1]
units are degrees
DEClim : tuple
select Declination between DEClim[0] and DEClim[1]
size : int (optional)
the size of the random arrays to return (default = 1)
Returns
-------
RA, DEC : ndarray
the random sample on the sphere within the given limits.
arrays have shape equal to size.
"""
zlim = np.sin(np.pi * np.asarray(DEClim) / 180.)
z = zlim[0] + (zlim[1] - zlim[0]) * np.random.random(size)
DEC = (180. / np.pi) * np.arcsin(z)
RA = RAlim[0] + (RAlim[1] - RAlim[0]) * np.random.random(size)
return RA, DEC
def ra_dec_to_xyz(ra, dec):
"""Convert ra & dec to Euclidean points
Parameters
----------
ra, dec : ndarrays
Returns
x, y, z : ndarrays
"""
sin_ra = np.sin(ra * np.pi / 180.)
cos_ra = np.cos(ra * np.pi / 180.)
sin_dec = np.sin(np.pi / 2 - dec * np.pi / 180.)
cos_dec = np.cos(np.pi / 2 - dec * np.pi / 180.)
return (cos_ra * sin_dec,
sin_ra * sin_dec,
cos_dec)
def angular_dist_to_euclidean_dist(D, r=1):
"""convert angular distances to euclidean distances"""
return 2 * r * np.sin(0.5 * D * np.pi / 180.)
def two_point(data, bins, method='standard',
data_R=None, random_state=None):
"""Two-point correlation function
Parameters
----------
data : array_like
input data, shape = [n_samples, n_features]
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
data_R : array_like (optional)
if specified, use this as the random comparison sample
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
"""
data = np.asarray(data)
bins = np.asarray(bins)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
n_samples, n_features = data.shape
Nbins = len(bins) - 1
# shuffle all but one axis to get background distribution
if data_R is None:
data_R = data.copy()
for i in range(n_features - 1):
rng.shuffle(data_R[:, i])
else:
data_R = np.asarray(data_R)
if (data_R.ndim != 2) or (data_R.shape[-1] != n_features):
raise ValueError('data_R must have same n_features as data')
factor = len(data_R) * 1. / len(data)
if sklearn_has_two_point:
# Fast two-point correlation functions added in scikit-learn v. 0.14
KDT_D = KDTree(data)
KDT_R = KDTree(data_R)
counts_DD = KDT_D.two_point_correlation(data, bins)
counts_RR = KDT_R.two_point_correlation(data_R, bins)
else:
warnings.warn("Version 0.3 of astroML will require scikit-learn "
"version 0.14 or higher for correlation function "
"calculations. Upgrade to sklearn 0.14+ now for much "
"faster correlation function calculations.")
BT_D = BallTree(data)
BT_R = BallTree(data_R)
counts_DD = np.zeros(Nbins + 1)
counts_RR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DD[i] = np.sum(BT_D.query_radius(data, bins[i],
count_only=True))
counts_RR[i] = np.sum(BT_R.query_radius(data_R, bins[i],
count_only=True))
DD = np.diff(counts_DD)
RR = np.diff(counts_RR)
# check for zero in the denominator
RR_zero = (RR == 0)
RR[RR_zero] = 1
if method == 'standard':
corr = factor ** 2 * DD / RR - 1
elif method == 'landy-szalay':
if sklearn_has_two_point:
counts_DR = KDT_R.two_point_correlation(data, bins)
else:
counts_DR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DR[i] = np.sum(BT_R.query_radius(data, bins[i],
count_only=True))
DR = np.diff(counts_DR)
corr = (factor ** 2 * DD - 2 * factor * DR + RR) / RR
corr[RR_zero] = np.nan
return corr
def bootstrap_two_point(data, bins, Nbootstrap=10,
method='standard', return_bootstraps=False,
random_state=None):
"""Bootstrapped two-point correlation function
Parameters
----------
data : array_like
input data, shape = [n_samples, n_features]
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
Nbootstrap : integer
number of bootstrap resamples to perform (default = 10)
method : string
"standard" or "landy-szalay".
return_bootstraps: bool
if True, return full bootstrapped samples
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr, corr_err : ndarrays
the estimate of the correlation function and the bootstrap
error within each bin. shape = Nbins
"""
data = np.asarray(data)
bins = np.asarray(bins)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
if Nbootstrap < 2:
raise ValueError("Nbootstrap must be greater than 1")
n_samples, n_features = data.shape
# get the baseline estimate
corr = two_point(data, bins, method=method, random_state=rng)
bootstraps = np.zeros((Nbootstrap, len(corr)))
for i in range(Nbootstrap):
indices = rng.randint(0, n_samples, n_samples)
bootstraps[i] = two_point(data[indices, :], bins, method=method,
random_state=rng)
# use masked std dev in case of NaNs
corr_err = np.asarray(np.ma.masked_invalid(bootstraps).std(0, ddof=1))
if return_bootstraps:
return corr, corr_err, bootstraps
else:
return corr, corr_err
def two_point_angular(ra, dec, bins, method='standard', random_state=None):
"""Angular two-point correlation function
A separate function is needed because angular distances are not
euclidean, and random sampling needs to take into account the
spherical volume element.
Parameters
----------
ra : array_like
input right ascention, shape = (n_samples,)
dec : array_like
input declination
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
"""
ra = np.asarray(ra)
dec = np.asarray(dec)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if (ra.ndim != 1) or (dec.ndim != 1) or (ra.shape != dec.shape):
raise ValueError('ra and dec must be 1-dimensional '
'arrays of the same length')
n_features = len(ra)
Nbins = len(bins) - 1
# draw a random sample with N points
ra_R, dec_R = uniform_sphere((min(ra), max(ra)),
(min(dec), max(dec)),
2 * len(ra))
data = np.asarray(ra_dec_to_xyz(ra, dec), order='F').T
data_R = np.asarray(ra_dec_to_xyz(ra_R, dec_R), order='F').T
# convert spherical bins to cartesian bins
bins_transform = angular_dist_to_euclidean_dist(bins)
return two_point(data, bins_transform, method=method,
data_R=data_R, random_state=rng)
def bootstrap_two_point_angular(ra, dec, bins, method='standard',
Nbootstraps=10, random_state=None):
"""Angular two-point correlation function
A separate function is needed because angular distances are not
euclidean, and random sampling needs to take into account the
spherical volume element.
Parameters
----------
ra : array_like
input right ascention, shape = (n_samples,)
dec : array_like
input declination
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
Nbootstraps : int
number of bootstrap resamples
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
dcorr : ndarray
error estimate on dcorr (sample standard deviation of
bootstrap resamples)
bootstraps : ndarray
The full sample of bootstraps used to compute corr and dcorr
"""
ra = np.asarray(ra)
dec = np.asarray(dec)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if (ra.ndim != 1) or (dec.ndim != 1) or (ra.shape != dec.shape):
raise ValueError('ra and dec must be 1-dimensional '
'arrays of the same length')
n_features = len(ra)
Nbins = len(bins) - 1
data = np.asarray(ra_dec_to_xyz(ra, dec), order='F').T
# convert spherical bins to cartesian bins
bins_transform = angular_dist_to_euclidean_dist(bins)
bootstraps = []
for i in range(Nbootstraps):
# draw a random sample with N points
ra_R, dec_R = uniform_sphere((min(ra), max(ra)),
(min(dec), max(dec)),
2 * len(ra))
data_R = np.asarray(ra_dec_to_xyz(ra_R, dec_R), order='F').T
if i > 0:
# random sample of the data
ind = np.random.randint(0, data.shape[0], data.shape[0])
data_b = data[ind]
else:
data_b = data
bootstraps.append(two_point(data_b, bins_transform, method=method,
data_R=data_R, random_state=rng))
bootstraps = np.asarray(bootstraps)
corr = np.mean(bootstraps, 0)
corr_err = np.std(bootstraps, 0, ddof=1)
return corr, corr_err, bootstraps
| bsd-2-clause |
mojoboss/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
daodaoliang/bokeh | bokeh/sampledata/daylight.py | 45 | 2522 | """Daylight hours from http://www.sunrisesunset.com """
from __future__ import absolute_import
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
import pandas as pd
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
| bsd-3-clause |
bundgus/python-playground | matplotlib-playground/examples/user_interfaces/rec_edit_gtk_custom.py | 1 | 1310 | """
generate an editable gtk treeview widget for record arrays with custom
formatting of the cells and show how to limit string entries to a list
of strings
"""
from __future__ import print_function
import gtk
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
import mpl_toolkits.gtktools as gtktools
datafile = cbook.get_sample_data('demodata.csv', asfileobj=False)
r = mlab.csv2rec(datafile, converterd={'weekdays': str})
formatd = mlab.get_formatd(r)
formatd['date'] = mlab.FormatDate('%Y-%m-%d')
formatd['prices'] = mlab.FormatMillions(precision=1)
formatd['gain'] = mlab.FormatPercent(precision=2)
# use a drop down combo for weekdays
stringd = dict(weekdays=['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'])
constant = ['clientid'] # block editing of this field
liststore = gtktools.RecListStore(r, formatd=formatd, stringd=stringd)
treeview = gtktools.RecTreeView(liststore, constant=constant)
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s' % (oldval, newval, liststore.r[rownum][colname]))
liststore.callbacks.connect('cell_changed', mycallback)
win = gtk.Window()
win.set_title('click to edit')
win.add(treeview)
win.show_all()
win.connect('delete-event', lambda *args: gtk.main_quit())
gtk.main()
| mit |
VU-Cog-Sci/PRF_experiment | exp_tools/Session.py | 1 | 19888 | #!/usr/bin/env python
# encoding: utf-8
"""
Session.py
Created by Tomas HJ Knapen on 2009-11-26.
Copyright (c) 2009 TK. All rights reserved.
"""
import os, sys, datetime
import subprocess, logging
import pickle, datetime, time
import scipy as sp
import numpy as np
# import matplotlib.pylab as pl
from math import *
from psychopy import visual, core, event, misc
# import VisionEgg
# VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
#
# from VisionEgg.Core import *
import pygame
from pygame.locals import *
from scipy.io import wavfile
import pyaudio, wave
from pylink import *
import pygaze
from pygaze import libscreen
from pygaze import eyetracker
from IPython import embed as shell
class Session(object):
"""Session is a main class that creates screen and file properties"""
def __init__(self, subject_initials, index_number):
super(Session, self).__init__()
self.subject_initials = subject_initials
self.index_number = index_number
self.setup_sound_system()
# pygame.mixer.init()
# os.chdir('sounds')
# self.sound_files = ['%d.wav' % i for i in range(3)] # subprocess.Popen('ls *.*', shell=True, stdout=subprocess.PIPE).communicate()[0].split('\n')[0:-1]
# self.sounds = [pygame.mixer.Sound(s) for s in self.sound_files]
# os.chdir('..')
self.clock = core.Clock()
self.outputDict = {'parameterArray': [], 'eventArray' : []}
self.events = []
self.stopped = False
def setup_sound_system(self):
"""initialize pyaudio backend, and create dictionary of sounds."""
self.pyaudio = pyaudio.PyAudio()
self.sound_files = subprocess.Popen('ls ' + os.path.join(os.environ['EXPERIMENT_HOME'], 'sounds', '*.wav'), shell=True, stdout=subprocess.PIPE).communicate()[0].split('\n')[0:-1]
self.sounds = {}
for sf in self.sound_files:
self.read_sound_file(file_name = sf)
# print self.sounds
def read_sound_file(self, file_name, sound_name = None):
"""Read sound file from file_name, and append to self.sounds with name as key"""
if sound_name == None:
sound_name = os.path.splitext(os.path.split(file_name)[-1])[0]
rate, data = wavfile.read(file_name)
# create stream data assuming 2 channels, i.e. stereo data, and use np.float32 data format
stream_data = data.astype(np.int16)
# check data formats - is this stereo sound? If so, we need to fix it.
wf = wave.open(file_name, 'rb')
# print sound_name
# print wf.getframerate(), wf.getnframes(), wf.getsampwidth(), wf.getnchannels()
if wf.getnchannels() == 2:
stream_data = stream_data[::2]
self.sounds.update({sound_name: stream_data})
def create_screen(self, size = (1280, 960), full_screen = False, background_color = (0.0,0.0,0.0),
gamma_scale = (2.475,2.25,2.15), physical_screen_size = (48, 32), physical_screen_distance = 71.0, max_lums = (24.52, 78.8, 10.19), wait_blanking = True, screen_nr = 0 ):
"""
create_screen take a screen to display the stimuli on.
the standard screen is assumed to be the Sony FW600, which is set up to start up in the
1280x960@96Hz resolution.
"""
# the actual screen-getting
self.display = libscreen.Display(disptype='psychopy', dispsize=size, fgc=(255,0,0), bgc=list((255*bgl for bgl in background_color)), screennr=screen_nr,mousevisible=False)
# self.pygaze_scr = libscreen.Screen(disptype='psychopy')
# print dir(self.display)
# print self.display
# print dir(self.pygaze_scr)
# print dir(self.pygaze_scr.screen[0])
self.screen = pygaze.expdisplay
self.screen.setMouseVisible(False)
self.screen.setColor(background_color)
# self.screen = visual.Window( size = size, fullscr = full_screen, allowGUI = False, units = 'pix', allowStencil = True, rgb = background_color, waitBlanking = wait_blanking, winType = 'pyglet' )
self.screen.background_color = background_color
# worked = pygame.display.set_gamma(gamma_scale[0],gamma_scale[1],gamma_scale[2])
self.screen_pix_size = size
self.max_lums = max_lums
self.physical_screen_size = physical_screen_size
self.physical_screen_distance = physical_screen_distance
self.screen_height_degrees = 2.0 * 180.0/pi * atan((physical_screen_size[1]/2.0)/physical_screen_distance)
self.pixels_per_degree = (size[1]) / self.screen_height_degrees
self.centimeters_per_degree = physical_screen_size[1] / self.screen_height_degrees
self.pixels_per_centimeter = self.pixels_per_degree / self.centimeters_per_degree
# print 'screen: ' + str(self.screen_height_degrees) + ' degrees tall and pixels per degree: ' + str(self.pixels_per_degree)
# self.screen.mousevis = False
self.screen.flip()
def create_output_file_name(self, data_directory = 'data'):
"""create output file"""
now = datetime.datetime.now()
opfn = now.strftime("%Y-%m-%d_%H.%M.%S")
if not os.path.isdir(data_directory):
os.mkdir(data_directory)
self.output_file = os.path.join(data_directory, self.subject_initials + '_' + str(self.index_number) + '_' + opfn )
def open_input_file(self):
"""
This method opens a pickle file that has input data in it.
we assume the input data consists of two arrays - one for parameters and one for timings.
the two arrays' rows will be trials.
"""
self.input_file_name = self.index_number + '.pickle'
ipf = open(self.input_file_name)
self.input_data = pickle.load(ipf)
ipf.close()
def create_input_data(self, save = False):
"""
This method should be provided by subclasses that create input data on the fly
"""
pass
def parse_input_data(self):
"""
We assume that the pickle file used as input will be an array,
the rows of which will be the requested trials.
"""
self.nr_trials = len(self.input_data)
def close(self):
"""close screen and save data"""
pygame.mixer.quit()
self.screen.close()
parsopf = open(self.output_file + '_outputDict.pickle', 'a')
pickle.dump(self.outputDict,parsopf)
parsopf.close()
def play_sound(self, sound_index = '0'):
"""docstring for play_sound"""
if type(sound_index) == int:
sound_index = str(sound_index)
# assuming 44100 Hz, mono channel np.int16 format for the sounds
stream_data = self.sounds[sound_index]
self.frame_counter = 0
def callback(in_data, frame_count, time_info, status):
data = stream_data[self.frame_counter:self.frame_counter+frame_count]
self.frame_counter += frame_count
return (data, pyaudio.paContinue)
# open stream using callback (3)
stream = self.pyaudio.open(format=pyaudio.paInt16,
channels=1,
rate=44100,
output=True,
stream_callback=callback)
stream.start_stream()
# stream.write(stream_data)
def play_np_sound(self, sound_array):
# assuming 44100 Hz, mono channel np.int16 format for the sounds
self.frame_counter = 0
def callback(in_data, frame_count, time_info, status):
data = sound_array[self.frame_counter:self.frame_counter+frame_count]
self.frame_counter += frame_count
return (data, pyaudio.paContinue)
# open stream using callback (3)
stream = self.pyaudio.open(format=pyaudio.paInt16,
channels=1,
rate=44100,
output=True,
stream_callback=callback)
stream.start_stream()
class EyelinkSession(Session):
"""docstring for EyelinkSession"""
def __init__(self, subject_initials, index_number):
super(EyelinkSession, self).__init__(subject_initials, index_number)
def create_tracker(self, tracker_on = True, sensitivity_class = 0, split_screen = False, screen_half = 'L', auto_trigger_calibration = 1, calibration_type = 'HV9', sample_rate = 1000):
"""
tracker sets up the connection and inputs the parameters.
only start tracker after the screen is taken, its parameters are set,
and output file names are created.
"""
self.eyelink_temp_file = self.subject_initials[:2] + '_' + str(self.index_number) + '_' + str(np.random.randint(99)) + '.edf'
# self.tracker.openDataFile(self.eyelink_temp_file)
if tracker_on:
# create actual tracker
try:
# self.tracker = EyeLink()
# shell()
self.tracker = eyetracker.EyeTracker(self.display, trackertype='eyelink', resolution=self.display.dispsize, data_file=self.eyelink_temp_file, bgc=self.display.bgc)
self.tracker_on = True
except:
print '\ncould not connect to tracker'
self.tracker = None
self.tracker_on = False
self.eye_measured, self.sample_rate, self.CR_mode, self.file_sample_filter, self.link_sample_filter = 'N', sample_rate, 1, 1, 1
return
else:
# not even create dummy tracker
self.tracker = None
self.tracker_on = False
return
self.apply_settings(sensitivity_class = sensitivity_class, split_screen = split_screen, screen_half = screen_half, auto_trigger_calibration = auto_trigger_calibration, calibration_type = calibration_type, sample_rate = sample_rate)
def apply_settings(self, sensitivity_class = 0, split_screen = False, screen_half = 'L', auto_trigger_calibration = True, sample_rate = 1000, calibration_type = 'HV9', margin = 60):
# set EDF file contents
self.tracker.send_command("file_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,MESSAGE,BUTTON")
# self.tracker.send_command("file_sample_filter = LEFT,RIGHT,GAZE,SACCADE,BLINK,MESSAGE,AREA")#,GAZERES,STATUS,HTARGET")
self.tracker.send_command("file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS,HTARGET")
# set link da (used for gaze cursor)
self.tracker.send_command("link_event_filter = LEFT,RIGHT,FIXATION,FIXUPDATE,SACCADE,BLINK")
self.tracker.send_command("link_sample_data = GAZE,GAZERES,AREA,HREF,PUPIL,STATUS")
self.tracker.send_command("link_event_data = GAZE,GAZERES,AREA,HREF,VELOCITY,FIXAVG,STATUS")
# set furtheinfo
self.tracker.send_command("screen_pixel_coords = 0 0 %d %d" %self.screen_pix_size)
self.tracker.send_command("pupil_size_diameter = %s"%('YES'));
self.tracker.send_command("heuristic_filter %d %d"%([1, 0][sensitivity_class], 1))
self.tracker.send_command("sample_rate = %d" % sample_rate)
# settings tt address saccade sensitivity - to be set with sensitivity_class parameter. 0 is cognitive style, 1 is pursuit/neurological style
self.tracker.send_command("saccade_velocity_threshold = %d" %[30, 22][sensitivity_class])
self.tracker.send_command("saccade_acceleration_threshold = %d" %[9500, 5000][sensitivity_class])
self.tracker.send_command("saccade_motion_threshold = %d" %[0.15, 0][sensitivity_class])
# self.tracker.send_command("file_sample_control = 1,0,0")
self.tracker.send_command("screen_phys_coords = %d %d %d %d" %(-self.physical_screen_size[0] / 2.0, self.physical_screen_size[1] / 2.0, self.physical_screen_size[0] / 2.0, -self.physical_screen_size[1] / 2.0))
self.tracker.send_command("simulation_screen_distance = " + str(self.physical_screen_distance))
if auto_trigger_calibration:
self.tracker.send_command("enable_automatic_calibration = YES")
else:
self.tracker.send_command("enable_automatic_calibration = NO")
# for binocular stereo-setup need to adjust the calibration procedure to sample only points on the left/right side of the screen. This allows only HV9 calibration for now.
# standard would be:
# self.tracker.).send_command("calibration_targets = 320,240 320,40 320,440 40,240 600,240 40,40 600,40, 40,440 600,440")
# ordering of points:
# ;; FOR 9-SAMPLE ALGORITHM:
# ;; POINTS MUST BE ORDERED ON SCREEN:
# ;; 5 1 6
# ;; 3 0 4
# ;; 7 2 8
# ;; ordering for points in bicubic ("HV13", 13 pt) cal
# ;; Point order: 6 2 7
# ;; 10 11
# ;; 4 1 5
# ;; 12 13
# ;; 8 3 9
if split_screen:
self.tracker.send_command("calibration_type = HV9")
self.tracker.send_command("generate_default_targets = NO")
sh, nsw = self.screen.size[1], self.screen.size[0]/2
points = np.array([[nsw/2, sh/2], [nsw/2, sh-margin], [nsw/2, margin], [margin, sh/2], [nsw-margin, sh/2], [margin, sh - margin], [nsw - margin, sh - margin], [margin, margin], [nsw - margin, margin]])
if screen_half == 'R':
points[:,0] += nsw
points_string = ''
for p in points:
points_string += "%s,%s " % tuple(p)
points_string = points_string[:-1] # delete last space
self.tracker.send_command("calibration_targets = " % points_string)
self.tracker.send_command("validation_targets = " % points_string)
else:
self.tracker.send_command("calibration_type = " + calibration_type)
def tracker_setup(self, sensitivity_class = 0, split_screen = False, screen_half = 'L', auto_trigger_calibration = True, calibration_type = 'HV9', sample_rate = 1000):
if self.tracker.connected():
self.tracker.calibrate()
# re-set all the settings to be sure of sample rate and filter and such that may have been changed during the calibration procedure and the subject pressing all sorts of buttons
self.apply_settings(sensitivity_class = sensitivity_class, split_screen = split_screen, screen_half = screen_half, auto_trigger_calibration = auto_trigger_calibration, calibration_type = calibration_type, sample_rate = sample_rate )
# we'll record the whole session continuously and parse the data afterward using the messages sent to the eyelink.
self.tracker.start_recording()
# for that, we'll need the pixel size and the like.
self.tracker.log('degrees per pixel ' + str(self.pixels_per_degree) )
# now, we want to know how fast we're sampling, really
# self.eye_measured, self.sample_rate, self.CR_mode, self.file_sample_filter, self.link_sample_filter = self.tracker.getModeData()
self.sample_rate = sample_rate
def drift_correct(self, position = None):
"""docstring for drift_correct"""
if self.tracker.connected():
if position == None: # standard is of course centered on the screen.
position = [self.screen.size[0]/2,self.screen.size[1]/2]
while 1:
# Does drift correction and handles the re-do camera setup situations
error = self.tracker.doDriftCorrect(position[0],position[1],1,1)
if error != 27:
break;
else:
self.tracker_setup()
def eye_pos(self):
if self.tracker:
return self.tracker.sample() # check for new sample update
# if(dt != None):
# # Gets the gaze position of the latest sample,
# if dt.isRightSample():
# gaze_position = dt.getRightEye().getGaze()
# return gaze_position[0],gaze_position[1] # self.screen.size[1]-
# elif dt.isLeftSample():
# gaze_position = dt.getLeftEye().getGaze()
# return gaze_position[0],gaze_position[1] # self.screen.size[1]-
# return 0,self.screen.size[1]-0
else:
pygame.event.pump()
(x,y) = pygame.mouse.get_pos()
y = self.screen.size[1]-y
return x,y
def detect_saccade(self, algorithm_type = 'velocity', threshold = 0.25, direction = None, fixation_position = None, max_time = 1.0 ):
"""
detect_saccade tries to detect a saccade based on position (needs fixation_position argument) or velocity (perhaps a direction argument?) information.
It can be 'primed' with a vector giving the predicted direction of the impending saccade.
detect_saccade looks for a saccade between call_time (= now) and max_time+call_time
"""
no_saccade = True
start_time = core.getTime()
if algorithm_type == 'velocity':
sample_array = np.zeros((max_time * self.sample_rate, 2), dtype = np.float32)
velocity_array = np.zeros((max_time * self.sample_rate, 2), dtype = np.float32)
f = np.array([1,1,2,3], dtype = np.float32)/7.0
nr_samples = 1
sample_array[0,:] = self.eye_pos()
velocity_array[0,:] = 0.001, 0.001
if direction != None: # make direction a unit vector if it is an argument to this function
direction = direction / np.linalg.norm(direction)
while no_saccade:
saccade_polling_time = core.getTime()
sample_array[nr_samples][:] = self.eye_pos()
if (sample_array[nr_samples-1][0] != sample_array[nr_samples][0]) or (sample_array[nr_samples-1][1] != sample_array[nr_samples][1]):
velocity_array[nr_samples][:] = sample_array[nr_samples][:] - sample_array[nr_samples-1][:]
if nr_samples > 3:
# scale velocities according to x and y median-based standard deviations, as in engbert & mergenthaler, 2006
med_scaled_velocity = velocity_array[:nr_samples]/np.mean(np.sqrt(((velocity_array[:nr_samples] - np.median(velocity_array[:nr_samples], axis = 0))**2)), axis = 0)
if direction != None:
# scale the velocity array according to the direction in the direction argument before thresholding
# assuming direction is a x,y unit vector specifying the expected direction of the impending saccade
if np.inner(med_scaled_velocity[nr_samples], direction) > threshold:
no_saccade = False
if np.linalg.norm(med_scaled_velocity[-1]) > threshold:
no_saccade = False
nr_samples += 1
if ( saccade_polling_time - start_time ) > max_time:
no_saccade = False
if algorithm_type == 'position' or not self.tracker:
if fixation_position == None:
fixation_position = np.array(self.eye_pos())
while no_saccade:
saccade_polling_time = core.getTime()
ep = np.array(self.eye_pos())
# print ep, fixation_position, threshold, np.linalg.norm(ep - fixation_position) / self.pixels_per_degree
if (np.linalg.norm(ep - fixation_position) / self.pixels_per_degree) > threshold:
# eye position is outside the safe zone surrounding fixation - swap the buffers to change saccade target position
no_saccade = False
# print '\n'
if ( saccade_polling_time - start_time ) > max_time:
no_saccade = False
if algorithm_type == 'eyelink':
while no_saccade:
self.tracker.wait_for_saccade_start()
saccade_polling_time = core.getTime()
# ev =
# if ev == 5: # start of a saccade
# no_saccade = False
# if ( saccade_polling_time - start_time ) > max_time:
# no_saccade = False
return saccade_polling_time
def close(self):
if self.tracker:
if self.tracker.connected():
self.tracker.stop_recording()
# inject local file name into pygaze tracker and then close.
self.tracker.local_data_file = self.output_file + '.edf'
self.tracker.close()
super(EyelinkSession, self).close()
def play_sound(self, sound_index = '1'):
"""docstring for play_sound"""
super(EyelinkSession, self).play_sound(sound_index = sound_index)
if self.tracker != None:
self.tracker.log('sound ' + str(sound_index) + ' at ' + str(core.getTime()) )
class StarStimSession(EyelinkSession):
"""StarStimSession adds starstim EEG trigger functionality to the EyelinkSession.
It assumes an active recording, using NIC already connected over bluetooth.
Triggers land in the file that's already set up and recording.
"""
def __init__(self, subject_initials, index_number, connect_to_starstim = False, TCP_IP = '10.0.1.201', TCP_PORT = 1234):
super(StarStimSession, self).__init__(subject_initials, index_number)
self.setup_starstim_connection(TCP_IP = TCP_IP, TCP_PORT = TCP_PORT, connect_to_starstim = connect_to_starstim)
def setup_starstim_connection(self, TCP_IP = '10.0.1.201', TCP_PORT = 1234, connect_to_starstim = True):
"""setup_starstim_connection opens a connection to the starstim to its standard ip address
and standard (trigger) port. For controlling the recordings etc, we need tcp port 1235, it seems.
more on that later.
"""
if connect_to_starstim:
self.star_stim_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.star_stim_socket.connect((TCP_IP, TCP_PORT))
self.star_stim_connected = True
else:
self.star_stim_connected = False
def close_starstim_connection(self):
if self.star_stim_connected:
self.star_stim_socket.close()
def send_starstim_trigger(self, trigger = 1):
if self.star_stim_connected:
self.star_stim_socket.sendall('<TRIGGER>%i</TRIGGER>'%trigger)
def close(self):
super(StarStimSession, self).close()
if self.star_stim_connected:
self.close_starstim_connection()
| mit |
jorik041/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
hainm/statsmodels | statsmodels/genmod/_prediction.py | 27 | 9437 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid=None,
df=None, dist=None, row_labels=None, linpred=None, link=None):
# TODO: is var_resid used? drop from arguments?
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
self.linpred = linpred
self.link = link
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
raise NotImplementedError
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
@property
def tvalues(self):
return self.predicted_mean / self.se_mean
def t_test(self, value=0, alternative='two-sided'):
'''z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : string
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
'''
# from statsmodels.stats.weightstats
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ['two-sided', '2-sided', '2s']:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args)*2
elif alternative in ['larger', 'l']:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ['smaller', 's']:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError('invalid alternative')
return stat, pvalue
def conf_int(self, method='endpoint', alpha=0.05, **kwds):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
currently ignored, only for compatibility, consistent signature
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
tmp = np.linspace(0, 1, 6)
is_linear = (self.link.inverse(tmp) == tmp).all()
if method == 'endpoint' and not is_linear:
ci_linear = self.linpred.conf_int(alpha=alpha, obs=False)
ci = self.link.inverse(ci_linear)
elif method == 'delta' or is_linear:
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
# if we want to stack at a new last axis, for lower.ndim > 1
# np.concatenate((lower[..., None], upper[..., None]), axis=-1)
return ci
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
#ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction_glm(self, exog=None, transform=True, weights=None,
row_labels=None, linpred=None, link=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
pred_kwds['linear'] = False
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
link_deriv = self.model.family.link.inverse_deriv(linpred.predicted_mean)
var_pred_mean = link_deriv**2 * (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
def params_transform_univariate(params, cov_params, link=None, transform=None,
row_labels=None):
"""
results for univariate, nonlinear, monotonicaly transformed parameters
This provides transformed values, standard errors and confidence interval
for transformations of parameters, for example in calculating rates with
`exp(params)` in the case of Poisson or other models with exponential
mean function.
"""
from statsmodels.genmod.families import links
if link is None and transform is None:
link = links.Log()
if row_labels is None and hasattr(params, 'index'):
row_labels = params.index
params = np.asarray(params)
predicted_mean = link.inverse(params)
link_deriv = link.inverse_deriv(params)
var_pred_mean = link_deriv**2 * np.diag(cov_params)
# TODO: do we want covariance also, or just var/se
dist = stats.norm
# TODO: need ci for linear prediction, method of `lin_pred
linpred = PredictionResults(params, np.diag(cov_params), dist=dist,
row_labels=row_labels, link=links.identity())
res = PredictionResults(predicted_mean, var_pred_mean, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
return res
| bsd-3-clause |
matthew-brett/draft-statsmodels | scikits/statsmodels/sandbox/mle.py | 1 | 1652 | '''What's the origin of this file? It is not ours.
Does not run because of missing mtx files, now included
changes: JP corrections to imports so it runs, comment out print
'''
import numpy as np
from numpy import dot, outer, random, argsort
from scipy import io, linalg, optimize
from scipy.sparse import speye
import matplotlib.pyplot as plt
def R(v):
rq = dot(v.T,A*v)/dot(v.T,B*v)
res = (A*v-rq*B*v)/linalg.norm(B*v)
data.append(linalg.norm(res))
return rq
def Rp(v):
""" Gradient """
result = 2*(A*v-R(v)*B*v)/dot(v.T,B*v)
#print "Rp: ", result
return result
def Rpp(v):
""" Hessian """
result = 2*(A-R(v)*B-outer(B*v,Rp(v))-outer(Rp(v),B*v))/dot(v.T,B*v)
#print "Rpp: ", result
return result
A = io.mmread('nos4.mtx') # clustered eigenvalues
#B = io.mmread('bcsstm02.mtx.gz')
#A = io.mmread('bcsstk06.mtx.gz') # clustered eigenvalues
#B = io.mmread('bcsstm06.mtx.gz')
n = A.shape[0]
B = speye(n,n)
random.seed(1)
v_0=random.rand(n)
print "try fmin_bfgs"
full_output = 1
data=[]
v,fopt, gopt, Hopt, func_calls, grad_calls, warnflag, allvecs = \
optimize.fmin_bfgs(R,v_0,fprime=Rp,full_output=full_output,retall=1)
if warnflag == 0:
plt.semilogy(np.arange(0,len(data)),data)
print 'Rayleigh quotient BFGS',R(v)
print "fmin_bfgs OK"
print "try fmin_ncg"
#
# WARNING: the program may hangs if fmin_ncg is used
#
data=[]
v,fopt, fcalls, gcalls, hcalls, warnflag, allvecs = \
optimize.fmin_ncg(R,v_0,fprime=Rp,fhess=Rpp,full_output=full_output,retall=1)
if warnflag==0:
plt.figure()
plt.semilogy(np.arange(0,len(data)),data)
print 'Rayleigh quotient NCG',R(v)
| bsd-3-clause |
mattloper/opendr | opendr/test_sh.py | 1 | 4734 | #!/usr/bin/env python
# encoding: utf-8
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
from chumpy import Ch
import numpy as np
from chumpy.utils import row, col
from .lighting import SphericalHarmonics
import unittest
try:
import matplotlib.pyplot as plt
except:
from .dummy import dummy as plt
from .topology import loop_subdivider
visualize = False
def getcam():
from .camera import ProjectPoints3D
w = 640
h = 320
f = np.array([500,500])
rt = np.zeros(3)
t = np.zeros(3)
k = np.zeros(5)
c = np.array([w/2., h/2.])
near = .1
far = 20.
frustum = {'near': near, 'far': far, 'width': int(w), 'height': int(h)}
pp = ProjectPoints3D(f=f, rt=rt, t=t, k=k, c=c)
return pp, frustum
class TestSphericalHarmonics(unittest.TestCase):
def test_spherical_harmonics(self):
global visualize
if visualize:
plt.ion()
# Get mesh
v, f = get_sphere_mesh()
from .geometry import VertNormals
vn = VertNormals(v=v, f=f)
#vn = Ch(mesh.estimate_vertex_normals())
# Get camera
cam, frustum = getcam()
# Get renderer
from .renderer import ColoredRenderer
cam.v = v
cr = ColoredRenderer(f=f, camera=cam, frustum=frustum, v=v)
sh_red = SphericalHarmonics(vn=vn, light_color=np.array([1,0,0]))
sh_green = SphericalHarmonics(vn=vn, light_color=np.array([0,1,0]))
cr.vc = sh_red + sh_green
ims_baseline = []
for comp_idx, subplot_idx in enumerate([3,7,8,9,11,12,13,14,15]):
sh_comps = np.zeros(9)
sh_comps[comp_idx] = 1
sh_red.components = Ch(sh_comps)
sh_green.components = Ch(-sh_comps)
newim = cr.r.reshape((frustum['height'], frustum['width'], 3))
ims_baseline.append(newim)
if visualize:
plt.subplot(3,5,subplot_idx)
plt.imshow(newim)
plt.axis('off')
offset = row(.4 * (np.random.rand(3)-.5))
#offset = row(np.array([1.,1.,1.]))*.05
vn_shifted = (vn.r + offset)
vn_shifted = vn_shifted / col(np.sqrt(np.sum(vn_shifted**2, axis=1)))
vn_shifted = vn_shifted.ravel()
vn_shifted[vn_shifted>1.] = 1
vn_shifted[vn_shifted<-1.] = -1
vn_shifted = Ch(vn_shifted)
cr.replace(sh_red.vn, vn_shifted)
if True:
for comp_idx in range(9):
if visualize:
plt.figure(comp_idx+2)
sh_comps = np.zeros(9)
sh_comps[comp_idx] = 1
sh_red.components = Ch(sh_comps)
sh_green.components = Ch(-sh_comps)
pred = cr.dr_wrt(vn_shifted).dot(col(vn_shifted.r.reshape(vn.r.shape) - vn.r)).reshape((frustum['height'], frustum['width'], 3))
if visualize:
plt.subplot(1,2,1)
plt.imshow(pred)
plt.title('pred (comp %d)' % (comp_idx,))
plt.subplot(1,2,2)
newim = cr.r.reshape((frustum['height'], frustum['width'], 3))
emp = newim - ims_baseline[comp_idx]
if visualize:
plt.imshow(emp)
plt.title('empirical (comp %d)' % (comp_idx,))
pred_flat = pred.ravel()
emp_flat = emp.ravel()
nnz = np.unique(np.concatenate((np.nonzero(pred_flat)[0], np.nonzero(emp_flat)[0])))
if comp_idx != 0:
med_diff = np.median(np.abs(pred_flat[nnz]-emp_flat[nnz]))
med_obs = np.median(np.abs(emp_flat[nnz]))
if comp_idx == 4 or comp_idx == 8:
self.assertTrue(med_diff / med_obs < .6)
else:
self.assertTrue(med_diff / med_obs < .3)
if visualize:
plt.axis('off')
def get_sphere_mesh():
from .util_tests import get_earthmesh
mesh = get_earthmesh(np.zeros(3), np.zeros(3)) # load_mesh(filename)
v, f = mesh.v*64., mesh.f
for i in range(3):
mtx, f = loop_subdivider(v, f)
v = mtx.dot(v.ravel()).reshape((-1,3))
v /= 200.
v[:,2] += 2
return v, f
if __name__ == '__main__':
visualize = True
plt.ion()
#unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSphericalHarmonics)
unittest.TextTestRunner(verbosity=2).run(suite)
plt.show()
import pdb; pdb.set_trace()
| mit |
aasensio/pyiacsun | pyiacsun/util/fvoigt.py | 1 | 1842 | # Author: [email protected]
# Code: Translation of IDL file
# @vivivum: https://github.com/vivivum/MilosIDL/
# Author: D. Orozco Suarez & J.C. del Toro Iniesta
def fvoigt(damp,vv):
"""
Extract spectral data from the Kitt Peak FTS-Spectral-Atlas
as provided by H. Neckel, Hamburg.
INPUTS:
DAMP: A scalar with the damping parameter
VV: Wavelength axis usually in Doppler units.
OUTPUTS:
H: Voigt function
F: Faraday-Voigt function
NOTES:
A rational approximation to the complex error function is used
after Hui, Armstrong, and Wray(1978, JQSRT 19, 509). H and F are
the real and imaginary parts of such function, respectively.
The procedure is inspired on that in SIR (Ruiz Cobo & del Toro
Iniesta 1992, ApJ 398, 385). On its turn, that routine was taken
from modifications by A. Wittmann (1986) to modifications by S.K.
Solanki (1985) to an original FORTRAN routine written by J.W. Harvey
and A. Nordlund.
"""
import numpy as np
A = [122.607931777104326, 214.382388694706425, 181.928533092181549,\
93.155580458138441, 30.180142196210589, 5.912626209773153,\
0.564189583562615]
B = [122.60793177387535, 352.730625110963558, 457.334478783897737,\
348.703917719495792, 170.354001821091472, 53.992906912940207,\
10.479857114260399,1.]
z = np.array(damp*np.ones(len(vv)) + -abs(vv)*1j)
Z = ((((((A[6]*z+A[5])*z+A[4])*z+A[3])*z+A[2])*z+A[1])*z+A[0])/\
(((((((z+B[6])*z+B[5])*z+B[4])*z+B[3])*z+B[2])*z+B[1])*z+B[0])
h = np.real(Z)
f = np.sign(vv)*np.imag(Z)*0.5
return [h,f]
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
uvals = np.linspace(-20., 20., 200)
a = 2.E-1
[h,f] = fvoigt(a,uvals)
plt.plot(uvals,f,'k-')
plt.plot(uvals,h,'r-')
plt.show()
| mit |
ngoix/OCRF | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
QISKit/qiskit-sdk-py | test/python/visualization/test_gate_map.py | 1 | 5558 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A test for visualizing device coupling maps"""
import unittest
import os
from ddt import ddt, data
from qiskit.test.mock import FakeProvider
from qiskit.test import QiskitTestCase
from qiskit.visualization.gate_map import _GraphDist, plot_gate_map, plot_circuit_layout
from qiskit.tools.visualization import HAS_MATPLOTLIB
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.transpiler import Layout
from .visualization import path_to_diagram_reference, QiskitVisualizationTestCase
if HAS_MATPLOTLIB:
import matplotlib.pyplot as plt
@ddt
class TestGateMap(QiskitVisualizationTestCase):
""" visual tests for plot_gate_map """
backends = list(filter(lambda x:
not (x.configuration().simulator or x.configuration().n_qubits == 2),
FakeProvider().backends()))
@data(*backends)
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
def test_plot_gate_map(self, backend):
""" tests plotting of gate map of a device (20 qubit, 16 qubit, 14 qubit and 5 qubit)"""
n = backend.configuration().n_qubits
img_ref = path_to_diagram_reference(str(n) + "bit_quantum_computer.png")
filename = "temp.png"
fig = plot_gate_map(backend)
fig.savefig(filename)
self.assertImagesAreEqual(filename, img_ref, 0.2)
os.remove(filename)
@data(*backends)
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
def test_plot_circuit_layout(self, backend):
""" tests plot_circuit_layout for each device"""
layout_length = int(backend._configuration.n_qubits / 2)
qr = QuantumRegister(layout_length, 'qr')
circuit = QuantumCircuit(qr)
circuit._layout = Layout({qr[i]: i * 2 for i in range(layout_length)})
n = backend.configuration().n_qubits
img_ref = path_to_diagram_reference(str(n) + "_plot_circuit_layout.png")
filename = str(n) + "_plot_circuit_layout_result.png"
fig = plot_circuit_layout(circuit, backend)
fig.savefig(filename)
self.assertImagesAreEqual(filename, img_ref, 0.1)
os.remove(filename)
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
class TestGraphDist(QiskitTestCase):
""" tests _GraphdDist functions """
def setUp(self):
""" setup plots for _GraphDist """
ax1 = plt.subplots(figsize=(5, 5))[1]
ax2 = plt.subplots(figsize=(9, 3))[1]
ax1.axis("off")
ax2.axis("off")
self.ax1 = ax1
self.ax2 = ax2
self.ax1_x0, self.ax1_y0 = ax1.transAxes.transform((0, 0))
self.ax1_x1, self.ax1_y1 = ax1.transAxes.transform((1, 1))
self.ax2_x0, self.ax2_y0 = ax2.transAxes.transform((0, 0))
self.ax2_x1, self.ax2_y1 = ax2.transAxes.transform((1, 1))
self.ax1_bounds_x, self.ax1_bounds_y = ax1.get_xlim(), ax1.get_ylim()
self.ax2_bounds_x, self.ax2_bounds_y = ax2.get_xlim(), ax2.get_ylim()
self.size = 4
self.real_values = [self.ax1_x1 - self.ax1_x0, self.ax1_y1 - self.ax1_y0,
self.ax2_x1 - self.ax2_x0, self.ax2_y1 - self.ax2_y0]
self.abs_values = [self.ax1_bounds_x[0] - self.ax1_bounds_x[1],
self.ax1_bounds_y[0] - self.ax1_bounds_y[1],
self.ax2_bounds_x[0] - self.ax2_bounds_x[1],
self.ax2_bounds_y[0] - self.ax2_bounds_y[1]]
self.val = []
for i in range(4):
val = (self.size / self.real_values[i]) * self.abs_values[i]
self.val.append(val)
def test_dist_real(self):
""" tests dist_real calculation for different figsize """
params = [(self.ax1, self.real_values[0], True), (self.ax1, self.real_values[1], False),
(self.ax2, self.real_values[2], True), (self.ax2, self.real_values[3], False)]
for test_val, expected_val, bool_op in params:
with self.subTest():
self.assertEqual(expected_val, _GraphDist(self.size, test_val, bool_op).dist_real)
def test_dist_abs(self):
""" tests dist_abs calculation for different figsize """
params = [(self.ax1, self.abs_values[0], True), (self.ax1, self.abs_values[1], False),
(self.ax2, self.abs_values[2], True), (self.ax2, self.abs_values[3], False)]
for test_val, expected_val, bool_op in params:
with self.subTest():
self.assertEqual(expected_val, _GraphDist(self.size, test_val, bool_op).dist_abs)
def test_value(self):
""" tests value calculation for size = 4 """
params = [(self.ax1, self.val[0], True), (self.ax1, self.val[1], False),
(self.ax2, self.val[2], True), (self.ax2, self.val[3], False)]
for test_val, expected_val, bool_op in params:
with self.subTest():
self.assertEqual(expected_val, _GraphDist(self.size, test_val, bool_op).value)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 |
rflamary/AstroImageReconsCNN | visu_images_rec.py | 1 | 2838 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 29 10:02:40 2016
@author: rflamary
"""
import sys
import numpy as np
import scipy as sp
import scipy.io as spio
import matplotlib.pylab as pl
import deconv
def get_fname(method,n,npsf,sigma,img):
return 'res/{}_{}x{}_PSF{}_sigma{:1.3f}_{}.mat'.format(method,n,n,npsf,sigma,img)
def get_fname_all(method,n,npsf,sigma):
return 'res/{}_{}x{}_PSF{}_sigma{:1.3f}_all.mat'.format(method,n,n,npsf,sigma)
# set seed
np.random.seed(1985)
if len(sys.argv)>1:
method=sys.argv[1]
else:
method='none'
#%% PSF
sigma=0.01
npsf=64
nr=5
cr=32
PSF=deconv.get_PSF_airy(npsf,nr)
npsf=64
#%%
lst_img=['M31','Hoag','M51a','M81','M101','M104']
#lst_img=['M31']
nb_img=len(lst_img)
lst_methods=['wiener','rl','vc_tv','cnn0','cnn']
nbm=len(lst_methods)
#%% generat
i=2
def sel(I):
return I[300:-cr-100,300:-cr-100]
img_txt=lst_img[i]
method='none'
I0=deconv.load_fits_image(img_txt)
n=1024
iv=1200;jv=1200
I0=I0[iv:iv+n,jv:jv+n]
fname=get_fname('none',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Inoise=data['Irec']
fname=get_fname('wiener',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Iwiener=data['Irec']
fname=get_fname('rl',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Irl=data['Irec']
fname=get_fname('vc_tv',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Itv=data['Irec']
fname=get_fname('cnn0',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Icnn0=data['Irec']
fname=get_fname('cnn',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Icnn=data['Irec']
#%% plot
szsb=[4,4]
pl.figure(1)
pl.clf()
fs=5
vmin=0.15
vmax=0.8
pl.subplot(szsb[0],szsb[1],1)
pl.imshow(sel(I0)/sel(I0).max(),cmap='gray',vmin=vmin,vmax=vmax)
pl.title('Original',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],2)
pl.imshow(np.sqrt(PSF),cmap='gray',interpolation='nearest')
pl.title('PSF',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],3)
pl.imshow(sel(Inoise),cmap='gray',vmin=vmin,vmax=vmax)
pl.title('Conv. + noisy',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],4)
pl.imshow(sel(Iwiener),cmap='gray',vmin=vmin,vmax=vmax)
pl.title('Wiener',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],5)
pl.imshow(sel(Irl),cmap='gray',vmin=vmin,vmax=vmax)
pl.title('RL',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],6)
pl.imshow(sel(Itv),cmap='gray',vmin=vmin,vmax=vmax)
pl.title('Prox. TV',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],7)
pl.imshow(sel(Icnn0),cmap='gray',vmin=vmin,vmax=vmax)
pl.title('1-CNN',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplot(szsb[0],szsb[1],8)
pl.imshow(sel(Icnn0),cmap='gray')
pl.title('3-CNN',fontsize=fs,y=0.95)
pl.axis("off")
pl.subplots_adjust(wspace=0.0,hspace=0.15)
pl.savefig('imgs/images_rec.png',dpi=400,bbox_inches='tight',pad_inches=.01)
| mit |
boland1992/seissuite_iran | build/lib/ambient/azimuth/decluster.py | 4 | 8306 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 08:44:50 2015
@author: boland
"""
#------------------------------------------------------------------------------
# MODULES
#------------------------------------------------------------------------------
import os
import fiona
import pickle
import pyproj
import datetime
import itertools
import shapefile
import numpy as np
import datetime as dt
import pointshape as ps
from shapely import geometry
import multiprocessing as mp
import matplotlib.pyplot as plt
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
#------------------------------------------------------------------------------
# VARIABLES
#------------------------------------------------------------------------------
verbose = False
#Enter path to boundary shape file.
shape_boundary = False
#shape_path = "/home/boland/Dropbox/University/UniMelb\
#/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
dataless = True
# Enter path to dataless file
#dataless_path = 'ALL_AUSTRALIA.870093.dataless'
#dataless_path = 'UOM.dataless'
# Enter number new stations desired.
N = 3
# Enter km spacing between path density points.
km_points = 20.0
# Reference elipsoid to calculate distance.
wgs84 = pyproj.Geod(ellps='WGS84')
# Enter number of bins for 2D Histogram density calculation.
nbins = 220
# Enter estimated average shear wave velocity. 3kms-1 is the default!
velocity = 3.0
# Define your ambient noise period range OR individual period in seconds.
global period_range
period_range = [1,40]
dataless_path = 'east-timor/timor.dataless'
dataless_path = '/storage/ANT/spectral_density/USARRAY/full_USARRAY.dataless'
coords = locs_from_dataless(dataless_path)
#shape_path = "/home/boland/Dropbox/University/UniMelb\
#/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
shape_path = 'east-timor/TLS_adm0.shp'
coords = locs_from_dataless(dataless_path)
#shape_path = "/home/boland/Dropbox/University/UniMelb\
#/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
t0 = dt.datetime.now()
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
print type(UNIQUE_SHAPE)
# Generate InPoly class
INPOLY = InPoly(shape_path)
# Create matplotlib Path object from imported shapefile
#outer_shape = UNIQUE_SHAPE.buffer(1.,resolution=1)
#inner_shape = UNIQUE_SHAPE.buffer(-8,resolution=1)
#outer_poly = INPOLY.poly_from_shape(shape=outer_shape)
#inner_poly = INPOLY.poly_from_shape(shape=inner_shape)
#many_points = INPOLY.rand_poly(poly=outer_poly, N=1e4)
# Scale smaller shape to fit inside larger shape.
#SMALL_SHAPE = scale(UNIQUE_SHAPE, xfact=0.3, yfact=0.3)
#points_in_small_shape = INPOLY.rand_shape(shape=SMALL_SHAPE, IN=False)
# Generate matplotlib Path object for the small scalled polygon
#small_poly = INPOLY.node_poly(SHAPE.external_coords(shape=SMALL_SHAPE))
# Remove points that are outside the buffered_poly
#outer_poly_points = INPOLY.points_in(many_points, poly=outer_poly)
# Remove points that are inside the small_poly
#inner_poly_points = np.asarray(INPOLY.points_in(outer_poly_points,
# poly=inner_poly,
# IN=False))
#cluster_points = np.asarray(kmeans(inner_poly_points, 130)[0])
#plt.figure()
#plt.scatter(inner_poly_points[:,0], inner_poly_points[:,1], c='b')
#plt.scatter(cluster_points[:,0], cluster_points[:,1], c='orange', s=35)
#plt.show()
#-----------------------------------------------------------------------------
# INITIALISE CLASS STATES
#-----------------------------------------------------------------------------
GEODESIC = Geodesic()
COORDS = Coordinates()
INPOLY = InPoly(shape_path)
POLY_NODES = INPOLY.poly_nodes()
#-----------------------------------------------------------------------------
# GENERATE SECOND SET OF VARIABLES AND STATES
#-----------------------------------------------------------------------------
ideal_path = 'ideal_coordinates.pickle'
#if no paths have been done before, start afresh!
#if dataless:
# coords = locs_from_dataless(dataless_path)
# original_coords = coords
#elif os.path.exists(ideal_path):
# f = open(name=ideal_path, mode='rb')
# coords = pickle.load(f)
# f.close()
# decluster the points to desired specifications.
coords = COORDS.decluster(inputs=coords, degree_dist=0.5)
lonmin, lonmax = np.floor(min(coords[:,0])), np.ceil(max(coords[:,0]))
latmin, latmax = np.floor(min(coords[:,1])), np.ceil(max(coords[:,1]))
print lonmin,lonmax,latmin,latmax
plt.figure()
plt.scatter(coords[:,0], coords[:,1])
plt.show()
kappa = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
def spread_paths(coord_list):
return GEODESIC.fast_paths(coord_list)
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, kappa)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
counter, counter2 = 0, 0
#cd Desktop/Link\ to\ SIMULATIONS/Network_Tracks/smarter_model/
grad_ideal, grad_check1, grad_check2, H_avg1, H_avg2 = 0, 0, 0, 0, 0
SHAPE = (1,1)
perc_high = 0.01
low_counter = 0
random_counter = 0
#new_coord = 0
infinite_counter = 0
find_it = []
check_coord = None
use_old_path = False
searches_per_point = 3
factor = 0.05
cluster = False
N_cluster_points = False
while infinite_counter <= 1:
t0 = datetime.datetime.now()
#----------------------------------------------------------------------
# Generate N new point coordinates
#----------------------------------------------------------------------
if cluster:
new_coords = N_cluster_points
else:
new_coords = ps.points_in_shape(shape_path, N)
coords = np.append(coords, new_coords, axis=0)
coord_set = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, coord_set)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "time to generate new paths", t1-t0
# Append new set of paths now that old set has been deleted.
#create a flattened numpy array of size 2xN from the paths created!
paths1 = GEODESIC.combine_paths(paths)
paths = list(paths)
paths1 = GEODESIC.remove_zeros(paths1)
DENSITY = Density(paths=paths1)
H, xedges, yedges = DENSITY.hist2d(paths=paths1)
grad = DENSITY.hgrad(H=H)
H_avg1 = np.average(H)
grad_check1 = np.std(grad)
H_masked = DENSITY.transform_h(H=H)
grad = DENSITY.transform_grad(grad=grad)
search = np.where(H<0.1*np.average(H))
Hmaxx, Hmaxy = search[1], search[0]
Hmaxx = (lonmax-lonmin)/(nbins) * Hmaxx + lonmin
Hmaxy = (latmax-latmin)/(nbins) * Hmaxy + latmin
# Make sure all low density coordinates ARE within shapefile!
low_density_coords = ps.paths_in_shape(np.column_stack((Hmaxx, Hmaxy)))
#N_cluster_points = kmeans(low_density_coords, N)[0]
density_coords = DENSITY.select_points()
# make sure that your density coords are within the boundary shape
density_coords = INPOLY.points_in(density_coords)
#cluster = True
if counter == 0:
grad_ideal = 1e6
avg_ideal = 0
if grad_check1 < grad_ideal and avg_ideal < H_avg1:
with open(u'ideal_coordinates.pickle', 'wb') as f:
print "\nExporting new ideal coordinates."
pickle.dump(coords, f, protocol=2)
DENSITY.plot_field()#nodes=POLY_NODES)#SHAPE=UNIQUE_SHAPE)
grad_ideal = grad_check1
avg_ideal = H_avg1
coords = COORDS.del_N(N=N, inputs=coords)
paths = COORDS.del_N(N=N, inputs=paths)
paths=list(paths)
counter+=1
t1 = datetime.datetime.now()
print "That loop took: ", t1-t0 | gpl-3.0 |
josherick/bokeh | bokeh/charts/builder/tests/test_scatter_builder.py | 33 | 2895 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Scatter
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestScatter(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)]
xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)]
xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [2, 3, 7, 5, 26]
y_pypy = [12, 23, 47, 15, 46]
y_jython = [22, 43, 10, 25, 26]
x_python = [1, 3, 4, 5, 8]
x_pypy = [1, 2, 4, 5, 8]
x_jython = [1, 2, 4, 6, 8]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['x_python'], x_python)
assert_array_equal(builder._data['x_jython'], x_jython)
assert_array_equal(builder._data['x_pypy'], x_pypy)
lvalues = [xyvalues['python'], xyvalues['pypy'], xyvalues['jython']]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
assert_array_equal(builder._data['x_0'], x_python)
assert_array_equal(builder._data['x_1'], x_pypy)
assert_array_equal(builder._data['x_2'], x_jython)
| bsd-3-clause |
fabioticconi/scikit-learn | sklearn/linear_model/sag.py | 29 | 11291 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
JaggedG/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
kinverarity1/geotransect | feature_plot.py | 2 | 10843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for plotting.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import csv
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
from striplog import Legend
import utils
from notice import Notice
def get_curve_params(abbrev, fname):
"""
Builds and returns a dictionary of petrophysical parameters for
plotting purposes.
Args:
abbrev (str): A curve mnemonic or other abbreviation.
fname (str): The path to a file with the curve configuration.
Returns:
dict: A mapping of parameter:value for the curve in question.
"""
params = {'acronym': abbrev}
with open(fname, 'rU') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['acronymn'] == abbrev:
params['track'] = int(row['track'])
params['units'] = row['units']
params['xleft'] = float(row['xleft'])
params['xright'] = float(row['xright'])
params['logarithmic'] = row['logarithmic']
params['hexcolor'] = row['hexcolor']
params['fill_left_cond'] = bool(row['fill_left_cond'])
params['fill_left'] = row['fill_left']
params['fill_right_cond'] = bool(row['fill_right_cond'])
params['fill_right'] = row['fill_right']
params['xticks'] = row['xticks'].split(',')
return params
def plot_feature_well(tc, gs):
"""
Plotting function for the feature well.
Args:
tc (TransectContainer): The container for the main plot.
log (axis): A matplotlib axis.
gs (GridSpec): A matplotlib gridspec.
"""
fname = tc.settings['curve_display']
logs = tc.log.get(tc.feature_well)
if not logs:
# There was no data for this well, so there won't be a feature plot.
Notice.fail("There's no well data for feature well " + tc.feature_well)
return gs
Z = logs.data['DEPT']
curves = ['GR', 'DT',
'DPHI_SAN',
'NPHI_SAN',
'DTS',
'RT_HRLT',
'RHOB',
'DRHO']
window = tc.settings.get('curve_smooth_window') or 51
ntracks = 5
lw = 1.0
smooth = True
naxes = 0
ncurv_per_track = np.zeros(ntracks)
if getattr(tc.log, 'striplog', None):
ncurv_per_track[0] = 1
for curve in curves:
naxes += 1
params = get_curve_params(curve, fname)
ncurv_per_track[params['track']] += 1
axss = plt.subplot(gs[2:, -5])
axs0 = [axss, axss.twiny()]
axs1 = [plt.subplot(gs[2:, -4])]
axs2 = [plt.subplot(gs[2:, -3])]
axs3 = [plt.subplot(gs[2:, -2])]
axs4 = [plt.subplot(gs[2:, -1])]
axs = [axs0, axs1, axs2, axs3, axs4]
if getattr(tc.log, 'striplog', None):
legend = Legend.default()
try:
logs.striplog[tc.log.striplog].plot_axis(axs0[0], legend=legend)
except KeyError:
# In fact, this striplog doesn't exist.
Notice.fail("There is no such striplog" + tc.log.striplog)
# And move on...
axs0[0].set_ylim([Z[-1], 0])
label_shift = np.zeros(len(axs))
for curve in curves:
try:
values = logs.data[curve]
except ValueError:
Notice.warning("Curve not present: "+curve)
values = np.empty_like(Z)
values[:] = np.nan
params = get_curve_params(curve, fname)
i = params['track']
j = 0
label_shift[i] += 1
linOrlog = params['logarithmic']
sxticks = np.array(params['xticks'])
xticks = np.array(sxticks, dtype=float)
whichticks = 'major'
if linOrlog == 'log':
midline = np.log(np.mean(xticks))
xpos = midline
whichticks = 'minor'
else:
midline = np.mean(xticks)
xpos = midline
if smooth:
values = utils.rolling_median(values, window)
if curve == 'GR':
j = 1 # second axis in first track
label_shift[i] = 1
if params['fill_left_cond']:
# do the fill for the lithology track
axs[i][j].fill_betweenx(Z, params['xleft'], values,
facecolor=params['fill_left'],
alpha=1.0, zorder=11)
if (curve == 'DPHI_SAN') and params['fill_left_cond']:
# do the fill for the neutron porosity track
try:
nphi = utils.rolling_median(logs.data['NPHI_SAN'], window)
except ValueError:
Notice.warning("No NPHI in this well")
nphi = np.empty_like(Z)
nphi[:] = np.nan
axs[i][j].fill_betweenx(Z,
nphi,
values,
where=nphi >= values,
facecolor=params['fill_left'],
alpha=1.0,
zorder=11)
axs[i][j].fill_betweenx(Z,
nphi,
values,
where=nphi <= values,
facecolor='#8C1717',
alpha=0.5,
zorder=12)
if curve == 'DRHO':
blk_drho = 3.2
values += blk_drho # this is a hack to get DRHO on RHOB scale
axs[i][j].fill_betweenx(Z,
blk_drho,
values,
where=nphi <= values,
facecolor='#CCCCCC',
alpha=0.5,
zorder=12)
# fill right
if params['fill_right_cond']:
axs[i][j].fill_betweenx(Z, values, params['xright'],
facecolor=params['fill_right'],
alpha=1.0, zorder=12)
# plot curve
axs[i][j].plot(values, Z, color=params['hexcolor'],
lw=lw, zorder=13)
# set scale of curve
axs[i][j].set_xlim([params['xleft'], params['xright']])
# ------------------------------------------------- #
# curve labels
# ------------------------------------------------- #
trans = transforms.blended_transform_factory(axs[i][j].transData,
axs[i][j].transData)
magic = -Z[-1] / 12.
axs[i][j].text(xpos, magic - (magic/4)*(label_shift[i]-1),
curve,
horizontalalignment='center',
verticalalignment='bottom',
fontsize=12, color=params['hexcolor'],
transform=trans)
# curve units
units = '${}$'.format(params['units'])
if label_shift[i] <= 1:
axs[i][j].text(xpos, magic*0.5,
units,
horizontalalignment='center',
verticalalignment='top',
fontsize=12, color='k',
transform=trans)
# ------------------------------------------------- #
# scales and tickmarks
# ------------------------------------------------- #
axs[i][j].set_xscale(linOrlog)
axs[i][j].set_ylim([Z[-1], 0])
axs[i][j].axes.xaxis.set_ticks(xticks)
axs[i][j].axes.xaxis.set_ticklabels(sxticks, fontsize=8)
for label in axs[i][j].axes.xaxis.get_ticklabels():
label.set_rotation(90)
axs[i][j].tick_params(axis='x', direction='out')
axs[i][j].xaxis.tick_top()
axs[i][j].xaxis.set_label_position('top')
axs[i][j].xaxis.grid(True, which=whichticks,
linewidth=0.25, linestyle='-',
color='0.75', zorder=100)
axs[i][j].yaxis.grid(True, which=whichticks,
linewidth=0.25, linestyle='-',
color='0.75', zorder=100)
axs[i][j].yaxis.set_ticks(np.arange(0, max(Z), 100))
if i != 0:
axs[i][j].set_yticklabels("")
# ------------------------------------------------- #
# End of curve loop
# ------------------------------------------------- #
# Add Depth label
axs[0][0].text(0, 1.05, 'MD\n$m$', fontsize='10',
horizontalalignment='center',
verticalalignment='center',
transform=axs[0][0].transAxes)
axs[0][0].axes.yaxis.get_ticklabels()
axs[0][0].axes.xaxis.set_ticklabels('')
for label in axs[0][0].axes.yaxis.get_ticklabels():
label.set_rotation(90)
label.set_fontsize(10)
for label in axs[1][0].axes.xaxis.get_ticklabels():
label.set_rotation(90)
label.set_fontsize(10)
# Add Tops
try:
if os.path.exists(tc.tops_file):
tops = utils.get_tops(tc.tops_file)
topx = get_curve_params('DT', fname)
topmidpt = np.amax((topx)['xright'])
# plot tops
for i in range(ntracks):
for mkr, depth in tops.iteritems():
# draw horizontal bars at the top position
axs[i][-1].axhline(y=depth,
xmin=0.01, xmax=.99,
color='b', lw=2,
alpha=0.5,
zorder=100)
# draw text box at the right edge of the last track
axs[-1][-1].text(x=topmidpt, y=depth, s=mkr,
alpha=0.5, color='k',
fontsize='8',
horizontalalignment='center',
verticalalignment='center',
zorder=10000,
bbox=dict(facecolor='white',
edgecolor='k',
alpha=0.25,
lw=0.25),
weight='light')
except AttributeError:
Notice.warning("No tops for this well")
except TypeError:
# We didn't get a tops file so move along.
print "No tops for this well"
return gs
| apache-2.0 |
puolival/multipy | multipy/permutation_test.py | 1 | 2218 | import matplotlib.pyplot as plt
from mne.viz import plot_evoked_topo
import numpy as np
from permutation import permutation_test
from viz import plot_permutation_distribution, plot_permutation_result_1d
import seaborn as sns
"""Test the permutation testing methods on MNE sample data."""
import mne
"""Settings."""
plot_topography = False
"""Load the sample dataset from disk."""
data_path = mne.datasets.sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.Raw(fname, preload=True)
"""Load EOG and ECG projectors."""
eog_proj = mne.read_proj(data_path +
'/MEG/sample/sample_audvis_eog-proj.fif')
ecg_proj = mne.read_proj(data_path +
'/MEG/sample/sample_audvis_ecg-proj.fif')
raw.add_proj(eog_proj)
raw.add_proj(ecg_proj)
"""Apply the projectors."""
raw.apply_proj()
"""Epoch the data."""
events = mne.find_events(raw)
raw.pick_types(meg=True)
lh_aud = mne.Epochs(raw, events, event_id=1, tmin=-0.75, tmax=0.75,
baseline=(-0.75, -0.5))
rh_aud = mne.Epochs(raw, events, event_id=3, tmin=-0.75, tmax=0.75,
baseline=(-0.75, -0.5))
"""Compared evoked responses"""
if (plot_topography):
lh_aud_evoked, rh_aud_evoked = lh_aud.average(), rh_aud.average()
plot_evoked_topo([lh_aud_evoked, rh_aud_evoked],
color=['blue', 'red'])
# Select all epochs and time points from one channel.
ch_ind = [i for i, ch_name in enumerate(raw.info['ch_names'])
if 'MEG 2343'in ch_name][0]
lh_data, rh_data = (lh_aud.get_data()[:, ch_ind, :],
rh_aud.get_data()[:, ch_ind, :])
significant, pvals, cstats, ref_cstat, ref_clusters = permutation_test(lh_data, rh_data)
plot_permutation_distribution(cstats, ref_cstat, show_plot=True)
plot_permutation_result_1d(lh_data, rh_data, significant, lh_aud.times, ref_clusters)
"""Plot trial averages."""
plot_ave = False
if (plot_ave):
sns.set_style('darkgrid')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.mean(lh_data, axis=0))
ax.plot(np.mean(rh_data, axis=0))
ax.set_xlabel('Time (samples)')
ax.set_ylabel('Amplitude')
fig.tight_layout()
plt.show()
| bsd-3-clause |
gclenaghan/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
jjx02230808/project0223 | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
daichi-yoshikawa/dnn | dnnet/neuralnet.py | 1 | 15563 | # Authors: Daichi Yoshikawa <[email protected]>
# License: BSD 3 clause
import os, sys
import matplotlib.pyplot as plt
import pickle
import time
import logging
logger = logging.getLogger('dnnet.log')
import dnnet
from dnnet.exception import DNNetIOError, DNNetRuntimeError
from dnnet.ext_mathlibs import cp, np
from dnnet.utils.nn_utils import prod, shuffle_data, split_data, w2im
from dnnet.utils.nn_utils import is_multi_channels_image, flatten, unflatten
from dnnet.training.back_propagation import BackPropagation
from dnnet.layers.layer import Layer, InputLayer, OutputLayer
class NeuralNetwork:
"""Interface of neural network.
Training of model and prediction with resulting model
is done through this class.
Parameters
----------
layers : np.array of derived class of Layer
Layers to build neural network.
The first layer must be InputLayer and last layer must be OutputLayer.
dtype : type
Data type selected through constructor.
"""
@classmethod
def load(self, name, path=None):
"""Load model from storage.
Arguments
---------
name : str or None, default None
Name of the desired file. Doesn't include path.
path : str or None, default None
Full path to the directory where the desired file is contained.
If None, file is loaded from a directory where script runs.
Returns
-------
NeuralNetwork
Returns model.
"""
if path is None:
path = '.'
if path[0] == '~':
path = os.getenv("HOME") + path[1:]
try:
with open(path + '/' + name, 'rb') as f:
return pickle.load(f)
except IOError as e:
msg = str(e) + '\nNeuralNetwork.load failed.'
raise DNNetIOError(msg)
def __init__(self, input_shape, dtype=np.float32):
"""
Arguments
---------
dtype : type, default np.float32
Data type to use.
"""
self.layers = np.array([], dtype=Layer)
self.dtype = dtype
self.add(InputLayer(input_shape=input_shape))
def add(self, layer):
"""Add instance of derived class of layer.
Build neural network by adding layers one by one with this method.
Arguments
---------
layer : Derived class of Layer
Instance of derived class of Layer.
"""
layer.set_dtype(self.dtype)
self.layers = np.append(self.layers, layer)
def compile(self):
"""Finalize configuration of neural network model.
Warning
-------
This method must be called after adding all required layers
and before starting training.
"""
logger.info('Define network with dnnet of version : %s'\
% dnnet.__version__)
if self.layers.size == 0:
msg = 'NeuralNetwork has no layer.\n Add layers before compiling.'
raise DNNetRuntimeError(msg)
parent = self.layers[0]
self.add(OutputLayer())
for i, layer in enumerate(self.layers, 1):
logger.debug('Add %s layer.' % layer.get_type())
layer.set_parent(parent)
parent = layer
logger.debug('Defined network.')
def fit(self, x, y, optimizer, loss_function, **kwargs):
"""Train model.
Arguments
---------
x : np.array
Descriptive features in 2d array,
whose shape is (num of data, num of feature)
y : np.array
Target features in 2d array,
whose shape is (num of data, num of feature)
optimizer : Derived class of Optimizer
Instance of derived class of Optimizer.
loss_function : Derived class of LossFunction
Used to calculate loss.
epochs : int, default 10
Number of iterations of training.
1 iteration scans all batches one time.
batch_size : int, default 100
Dataset is splitted into multiple mini batches
whose size is this.
learning_curve : bool, default True
Prints out evaluation results of ongoing training.
Also, returns learning curve after completion of training.
shuffle : bool, default True
Shuffle dataset one time before training.
shuffle_per_epoch : bool, default False
Shuffle training data every epoch.
test_data_ratio : float, default 0
Ratio of test data. If 0, all data is used for training.
train_data_ratio_for_eval : float, default 1.0
Ratio of training data to calculate accuracy w.r.t training data.
Returns
-------
LearningCurve
Instance of LearningCurve, which contains
losses and accuracies for train and test data.
Warning
-------
This method assumes that x and y include all data you use.
If your data set is so large that all data cannot be stored in memory,
you cannot use this method. Use fit_genenerator instead.
"""
start = time.time()
epochs = kwargs.pop('epochs', 10)
batch_size = kwargs.pop('batch_size', 100)
learning_curve = kwargs.pop('learning_curve', True)
shuffle = kwargs.pop('shuffle', True)
shuffle_per_epoch = kwargs.pop('shuffle_per_epoch', False)
test_data_ratio = kwargs.pop('test_data_ratio', self.dtype(0.))
train_data_ratio_for_eval = kwargs.pop(
'train_data_ratio_for_eval', 1.0)
logger.info('\n--- Parameters ---\nepochs: %d\nbatch_size: %d\n'
'learning_curve: %r\nshuffle: %r\nshuffle_per_epoch: %r\n'
'test_data_ratio: %f\ntest_data_ratio_for_eval: %f\n'
'optimizer: %s\nloss_function: %s'
% (epochs, batch_size, learning_curve, shuffle,
shuffle_per_epoch, test_data_ratio,
train_data_ratio_for_eval, optimizer.get_type(),
loss_function.get_type()))
if shuffle:
logger.debug('shuffle data.')
x, y = shuffle_data(x, y)
x, y = self.__convert_dtype(x, y)
x_train, y_train, x_test, y_test = split_data(x, y, test_data_ratio)
logger.info('Train data input, output : %s, %s'
% (x_train.shape, y_train.shape))
logger.info('Test data input, output : %s, %s'
% (x_test.shape, y_test.shape))
back_prop = BackPropagation(
epochs, batch_size, optimizer, loss_function,
learning_curve, self.dtype)
np_err_config = np.seterr('raise')
try:
logger.info('Fitting model starts.')
lc = back_prop.fit(
self.layers, x_train, y_train, x_test, y_test,
shuffle_per_epoch, batch_size, train_data_ratio_for_eval)
except FloatingPointError as e:
msg = str(e) + '\nOverflow or underflow occurred. '\
+ 'Retry with smaller learning_rate or '\
+ 'larger weight_decay for Optimizer.'
raise DNNetRuntimeError(msg)
except Exception as e:
raise DNNetRuntimeError(e)
finally:
np.seterr(
divide=np_err_config['divide'],
over=np_err_config['over'],
under=np_err_config['under'],
invalid=np_err_config['invalid']
)
end = time.time()
logger.info('Fitting model is done. '
'Processing time : %.2f[s]\n' % (end - start))
return lc
def fit_generator(self, x, y, optimizer, loss_function, **kwargs):
"""Train model for large size data set by using generator.
TODO(
"""
raise NotImplementError('NeuralNetwork.fit_one_batch')
def predict(self, x):
"""Returns predicted result.
Arguments
---------
x : np.array
Discriptive features in 2d array,
whose shape is (num of data, num of features)
Returns
-------
np.array
Predicted target features in 2d array,
whose shape is (num of data, num of features)
"""
return self.layers[0].predict(x.astype(self.dtype))
def get_config_str(self):
config_str = ''
for i, layer in enumerate(self.layers):
config_str += layer.get_config_str() + '\n'
config_str = config_str.rstrip('\n')
return config_str
def save(self, name, path=None):
"""Save model to storage.
Arguments
---------
name : str or None, default None
Name of the resulting file. Doesn't include path.
path : str or None, default None
Full path to the directory where the resulting file is generated.
If None, file is saved in a directory where script runs.
Returns
-------
bool
Returns true when succeeded.
"""
if path is None:
path = '.'
if path[0] == '~':
path = os.getenv("HOME") + path[1:]
try:
with open(os.path.join(path, name), 'wb') as f:
pickle.dump(self, f)
except IOError as e:
msg = str(e) + '\nNeuralNetwork.save failed.'
raise DNNetIOError(msg)
def visualize_filters(
self, index, n_rows, n_cols, filter_shape, figsize=(8, 8)):
"""Visualize filters.
Weight matrix in affine layer or convolution layer
can be shown as image.
If weight matrix is so big that all filters cannot be displayed,
displayed filters are randomly selected.
Arguments
---------
index : int
index-th affine/convolution layer's weight matrix is visualized.
This index starts from 0, that is,
the first layer with weight matrix is 0-th.
If this value is out of range, raise DNNetRuntimeError.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
filter_shape : tuple (rows, cols)
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
"""
# Get index of layer which is index-th layer with weight matrix.
n_layers_w_filter = 0
tgt_layer_idx = None
tgt_layer_type = None
for i, layer in enumerate(self.layers, 0):
if layer.has_weight():
if n_layers_w_filter == index:
tgt_layer_idx = i
tgt_layer_type = layer.get_type()
break
n_layers_w_filter += 1
if tgt_layer_idx is None:
msg = str(index) + '-th layer with weight matrix doesn\'t exist.'
raise DNNetRuntimeError(msg)
if tgt_layer_type == 'convolution':
self.visualize_filter_of_convolution_layer(
self.layers[tgt_layer_idx], n_rows, n_cols, filter_shape, figsize)
elif tgt_layer_type == 'affine':
self.visualize_filter_of_affine_layer(
self.layers[tgt_layer_idx], n_rows, n_cols, filter_shape, figsize)
else:
msg = 'NeuralNetwork.visualize_filters does not support '\
+ '%s' % tgt_layer_type
raise DNNetRuntimeError(msg)
print(tgt_layer_idx, tgt_layer_type)
def visualize_filter_of_convolution_layer(
self, layer, n_rows, n_cols, filter_shape, figsize=(8, 8)):
n_filters = layer.w.shape[1]
if n_filters < n_rows * n_cols:
msg = 'n_rows and n_cols is too big.\n'\
+ 'n_filters : %d\n' % n_filters\
+ 'n_rows : %d\n' % n_rows\
+ 'n_cols : %d\n' % n_cols
raise DNNetRuntimeError(msg)
w = layer.w[1:, :n_rows*n_cols]
img = w.T.reshape(-1, filter_shape[0], filter_shape[1])
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
def visualize_filter_of_affine_layer(
self, layer, n_rows, n_cols, filter_shape, figsize=(8, 8)):
"""Visualize filters.
Weight matrix in affine layer or convolution layer
can be shown as image.
If weight matrix is so big that all filters cannot be displayed,
displayed filters are randomly selected.
Arguments
---------
index : int
index-th affine/convolution layer's weight matrix is visualized.
This index starts from 0, that is,
the first layer with weight matrix is 0-th.
If this value is out of range, raise DNNetRuntimeError.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
"""
w = layer.w
if (w.shape[0] - 1) != prod(shape):
msg = '(w.shape[0] - 1) != prod(shape)\n'\
+ 'w.shape[0] : %d\n' % w.shape[0]\
+ 'prod(shape) : %d' % prod(shape)
raise DNNetRuntimeError(msg)
#if w.shape[1] < prod(layout):
#img = w2im(self.layers[tgt_index].w, shape, layout)
#plt.figure(figsize=figsize)
#plt.imshow(img)
#plt.show()
def show_filters(self, index, shape, layout, figsize=(8, 8)):
"""Visualize filters.
Weight matrix in affine layer or convolution layer
can be shown as image.
If weight matrix is so big that all filters cannot be displayed,
displayed filters are randomly selected.
Arguments
---------
index : int
index-th affine/convolution layer's weight matrix is visualized.
This index starts from 0, that is,
the first layer with weight matrix is 0-th.
If this value is out of range, raise RuntimeError.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
"""
# Get index of layer which is index-th layer with weight matrix.
num_of_layer_with_filter = 0
tgt_index = None
for i, layer in enumerate(self.layers, 0):
if layer.has_weight():
if num_of_layer_with_filter == index:
tgt_index = i
break
num_of_layer_with_filter += 1
if tgt_index is None:
msg = str(index) + '-th layer with weight matrix doesn\'t exist.'
raise DNNetRuntimeError(msg)
img = w2im(self.layers[tgt_index].w, shape, layout)
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
def __convert_dtype(self, x, y):
"""Convert data type of features into selected one in constructor."""
return x.astype(self.dtype), y.astype(self.dtype)
| bsd-3-clause |
rupakc/Kaggle-Compendium | Otto Group Product Classification Challenge/otto-baseline.py | 1 | 2583 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import train_test_split
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
otto_frame = pd.read_csv(filename)
otto_frame.dropna(inplace=True)
del otto_frame['id']
class_labels = list(otto_frame['target'].values)
del otto_frame['target']
X_train,X_test,y_train,y_test = train_test_split(otto_frame.values,class_labels,test_size=0.2,random_state=42)
classifier_list, classifier_name_list = get_ensemble_models()
#classifier_list, classifier_name_list = get_naive_bayes_models()
#classifier_list, classifier_name_list = get_neural_network()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
| mit |
mmoiozo/IROS | sw/airborne/test/stabilization/compare_ref_quat.py | 48 | 1123 | #! /usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from ref_quat_float import RefQuatFloat
from ref_quat_int import RefQuatInt
steps = 512 * 2
ref_float_res = np.zeros((steps, 3))
ref_int_res = np.zeros((steps, 3))
ref_float = RefQuatFloat()
ref_int = RefQuatInt()
q_sp = np.array([0.92387956, 0.38268346, 0., 0.])
ref_float.setpoint = q_sp
ref_int.setpoint = q_sp
#print(ref_int.setpoint)
dt = 1/512
for i in range(0, steps):
ref_float.update(dt)
ref_float_res[i, :] = ref_float.eulers
ref_int.update(dt)
ref_int_res[i, :] = ref_int.eulers
plt.figure(1)
plt.subplot(311)
plt.title("reference in euler angles")
plt.plot(np.degrees(ref_float_res[:, 0]), 'g')
plt.plot(np.degrees(ref_int_res[:, 0]), 'r')
plt.ylabel("phi [deg]")
plt.subplot(312)
plt.plot(np.degrees(ref_float_res[:, 1]), 'g')
plt.plot(np.degrees(ref_int_res[:, 1]), 'r')
plt.ylabel("theta [deg]")
plt.subplot(313)
plt.plot(np.degrees(ref_float_res[:, 2]), 'g')
plt.plot(np.degrees(ref_int_res[:, 2]), 'r')
plt.ylabel("psi [deg]")
plt.show()
| gpl-2.0 |
Fireblend/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
jpo/healthcareai-py | healthcareai/tests/test_predict.py | 4 | 1135 | import unittest
from sklearn.linear_model import LinearRegression
from healthcareai.common.healthcareai_error import HealthcareAIError
from healthcareai.common.predict import validate_estimator
class TestPredictValidation(unittest.TestCase):
def test_predict_validation_should_raise_error_on_non_estimator(self):
self.assertRaises(HealthcareAIError, validate_estimator, 'foo')
def test_predict_validation_error_message_on_non_estimator(self):
non_estimator_junk_data = 'foo'
try:
validate_estimator(non_estimator_junk_data)
# Fail the test if no error is raised
self.fail()
except HealthcareAIError as e:
expected_message = 'Predictions require an estimator. You passed in foo, which is of type: {}'.format(
type(non_estimator_junk_data))
self.assertEqual(expected_message, e.message)
def test_predict_validation_should_be_true_with_instance_of_scikit_estimator(self):
estimator = LinearRegression()
self.assertTrue(validate_estimator(estimator))
if __name__ == '__main__':
unittest.main()
| mit |
feranick/SpectralMachine | Archive/SpectraLearnPredict2/SpectraLearnPredict2/slp/slp_kmeans.py | 1 | 3378 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* SpectraLearnPredict2 - kmeans
* Perform Machine Learning on Spectroscopy Data.
*
* Uses: Deep Neural Networks, TensorFlow, SVM, PCA, K-Means
*
* By: Nicola Ferralis <[email protected]>
*
***********************************************************
'''
import matplotlib
if matplotlib.get_backend() == 'TkAgg':
matplotlib.use('Agg')
import numpy as np
import sys, os.path, getopt, glob, csv
import random, time, configparser, os
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
from .slp_config import *
#********************
''' Run K-Means '''
#********************
def runKMmain(A, Cl, En, R, Aorig, Rorig):
from sklearn.cluster import KMeans
print('==========================================================================\n')
print(' Running K-Means...')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if kmDef.customNumKMComp == False:
numKMcomp = np.unique(Cl).shape[0]
else:
numKMcomp = kmDef.numKMcomponents
kmeans = KMeans(n_clusters=numKMcomp, random_state=0).fit(A)
prediction = kmeans.predict(R)[0]
print('\n ==============================')
print(' \033[1mK-Means\033[0m - Prediction')
print(' ==============================')
print(' Class\t| Value')
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == prediction:
print(" {0:d}\t| {1:.2f}".format(prediction,Cl[j]))
print(' ==============================\n')
if kmDef.plotKM:
import matplotlib.pyplot as plt
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == kmeans.predict(R)[0]:
plt.plot(En, Aorig[j,:])
plt.plot(En, Rorig[0,:], linewidth = 2, label='Predict')
plt.title('K-Means')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Intensity')
plt.legend()
plt.show()
return kmeans.predict(R)[0]
#**********************************************
''' K-Means - Maps'''
#**********************************************
def KmMap(mapFile, numKMcomp):
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
R, Rx, Rorig = preProcessNormMap(R, Rx, type)
from sklearn.cluster import KMeans
print(' Running K-Means...')
print(' Number of classes: ' + str(numKMcomp))
kmeans = KMeans(n_clusters=kmDef.numKMcomponents, random_state=0).fit(R)
kmPred = np.empty([R.shape[0]])
for i in range(0, R.shape[0]):
kmPred[i] = kmeans.predict(R[i,:].reshape(1,-1))[0]
saveMap(mapFile, 'KM', 'Class', int(kmPred[i]), X[i], Y[i], True)
if kmPred[i] in kmeans.labels_:
if os.path.isfile(saveMapName(mapFile, 'KM', 'Class_'+ str(int(kmPred[i]))+'-'+str(np.unique(kmeans.labels_).shape[0]), False)) == False:
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, Rx)), ' ', ' ', False)
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, R[1,:])), X[i], Y[i], False)
if kmDef.plotKM:
plotMaps(X, Y, kmPred, 'K-Means')
| gpl-3.0 |
frank-tancf/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12303 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
fuxiang90/speed-prediction | script/lr.py | 1 | 2753 | # coding=utf-8
#! /usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import mlpy
import sys
import math
def get_input(filename):
fin = open(filename)
x = []
y = []
for each in fin:
each = each[:each.find('\n')]
l = each.split(' ')
each_x = []
each_x.append(1)
each_x .append(float(l[0]))
each_x .append(float(l[1]))
each_x .append(float(l[2]))
each_x .append(float(l[3]))
x.append(each_x)
y.append(float(l[4]))
return x ,y
def mlpy_linear_regressiono(x ,y):
beta ,rank = mlpy.ols_base(x, y,0.05)
return beta
def split(filename):
f_train = open('train','w')
f_test = open('test','w')
fin = open(filename)
pos = 0
for each in fin:
if pos % 8 ==0:
f_test.write(each)
else:
f_train.write(each)
pos = pos + 1
fin.close()
f_train.close()
f_test.close()
def ridge_main():
x,y = get_input('train')
ridge = mlpy.Ridge()
ridge.learn(x, y)
test_x ,test_y = get_input('test')
right_num = 0
mae = 0.0
for pos in range(0,len(test_x)):
yy = ridge.pred(test_x[pos])
#print yy ,test_y[pos]
if abs(yy - test_y[pos]) < 5.0 :
right_num +=1
mae += math.sqrt( (yy-test_y[pos]) ** 2 )
print right_num*1.0 / len(test_x), mae/len(test_x)
def lars_base_main():
x,y = get_input('train')
lars = mlpy.LARS()
lars.learn(x, y)
test_x ,test_y = get_input('test')
right_num = 0
for pos in range(0,len(test_x)):
yy = lars.pred(test_x[pos])
#print yy ,test_y[pos]
if abs(yy - test_y[pos]) < 5.0 :
right_num +=1
print right_num*1.0 / len(test_x)
def main():
x,y = get_input('train')
beta = mlpy_linear_regressiono(x,y)
test_x ,test_y = get_input('test')
right_num = 0
mae = 0.0
for pos in range(0,len(test_x)):
pre_y = test_x[pos][0] * beta[0] + test_x[pos][1] * beta[1] + test_x[pos][2] * beta[2] + test_x[pos][3] * beta[3]
#print pre_y , y[pos]
if abs(pre_y - y[pos]) < 5.0:
right_num += 1
mae += math.fabs(pre_y-y[pos] )
print right_num*1.0 / len(test_x) ,mae/len(test_x)
def test_main():
files = ['2111_data' , '22520_data' ,'3860_data','390_data','620_data']
for file_name in files:
name = file_name[:file_name.find('_')]
print "roadid :" , name
split(file_name)
#ridge_main()
main()
if __name__ =='__main__':
#split('2111_data')
#ridge_main()
test_main()
#main()
#lars_base_main()
| bsd-3-clause |
yebrahim/pydatalab | legacy_tests/bigquery/table_tests.py | 4 | 32582 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import object
import calendar
import datetime as dt
import mock
import pandas
import unittest
import google.auth
import datalab.bigquery
import datalab.context
import datalab.utils
class TestCases(unittest.TestCase):
def _check_name_parts(self, table):
parsed_name = table._name_parts
self.assertEqual('test', parsed_name[0])
self.assertEqual('requestlogs', parsed_name[1])
self.assertEqual('today', parsed_name[2])
self.assertEqual('', parsed_name[3])
self.assertEqual('[test:requestlogs.today]', table._repr_sql_())
self.assertEqual('test:requestlogs.today', str(table))
def test_api_paths(self):
name = datalab.bigquery._utils.TableName('a', 'b', 'c', 'd')
self.assertEqual('/projects/a/datasets/b/tables/cd',
datalab.bigquery._api.Api._TABLES_PATH % name)
self.assertEqual('/projects/a/datasets/b/tables/cd/data',
datalab.bigquery._api.Api._TABLEDATA_PATH % name)
name = datalab.bigquery._utils.DatasetName('a', 'b')
self.assertEqual('/projects/a/datasets/b', datalab.bigquery._api.Api._DATASETS_PATH % name)
def test_parse_full_name(self):
table = TestCases._create_table('test:requestlogs.today')
self._check_name_parts(table)
def test_parse_local_name(self):
table = TestCases._create_table('requestlogs.today')
self._check_name_parts(table)
def test_parse_dict_full_name(self):
table = TestCases._create_table({'project_id': 'test', 'dataset_id': 'requestlogs',
'table_id': 'today'})
self._check_name_parts(table)
def test_parse_dict_local_name(self):
table = TestCases._create_table({'dataset_id': 'requestlogs', 'table_id': 'today'})
self._check_name_parts(table)
def test_parse_named_tuple_name(self):
table = TestCases._create_table(datalab.bigquery._utils.TableName('test',
'requestlogs', 'today', ''))
self._check_name_parts(table)
def test_parse_tuple_full_name(self):
table = TestCases._create_table(('test', 'requestlogs', 'today'))
self._check_name_parts(table)
def test_parse_tuple_local(self):
table = TestCases._create_table(('requestlogs', 'today'))
self._check_name_parts(table)
def test_parse_array_full_name(self):
table = TestCases._create_table(['test', 'requestlogs', 'today'])
self._check_name_parts(table)
def test_parse_array_local(self):
table = TestCases._create_table(['requestlogs', 'today'])
self._check_name_parts(table)
def test_parse_invalid_name(self):
with self.assertRaises(Exception):
TestCases._create_table('today@')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_metadata(self, mock_api_tables_get):
name = 'test:requestlogs.today'
ts = dt.datetime.utcnow()
mock_api_tables_get.return_value = TestCases._create_table_info_result(ts=ts)
t = TestCases._create_table(name)
metadata = t.metadata
self.assertEqual('Logs', metadata.friendly_name)
self.assertEqual(2, metadata.rows)
self.assertEqual(2, metadata.rows)
self.assertTrue(abs((metadata.created_on - ts).total_seconds()) <= 1)
self.assertEqual(None, metadata.expires_on)
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_schema(self, mock_api_tables):
mock_api_tables.return_value = TestCases._create_table_info_result()
t = TestCases._create_table('test:requestlogs.today')
schema = t.schema
self.assertEqual(2, len(schema))
self.assertEqual('name', schema[0].name)
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_schema_nested(self, mock_api_tables):
mock_api_tables.return_value = TestCases._create_table_info_nested_schema_result()
t = TestCases._create_table('test:requestlogs.today')
schema = t.schema
self.assertEqual(4, len(schema))
self.assertEqual('name', schema[0].name)
self.assertEqual('val', schema[1].name)
self.assertEqual('more', schema[2].name)
self.assertEqual('more.xyz', schema[3].name)
self.assertIsNone(schema['value'])
self.assertIsNotNone(schema['val'])
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_malformed_response_raises_exception(self, mock_api_tables_get):
mock_api_tables_get.return_value = {}
t = TestCases._create_table('test:requestlogs.today')
with self.assertRaises(Exception) as error:
t.schema
self.assertEqual('Unexpected table response: missing schema', str(error.exception))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_dataset_list(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
tables = []
for table in ds:
tables.append(table)
self.assertEqual(2, len(tables))
self.assertEqual('test:testds.testTable1', str(tables[0]))
self.assertEqual('test:testds.testTable2', str(tables[1]))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_table_list(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
tables = []
for table in ds.tables():
tables.append(table)
self.assertEqual(2, len(tables))
self.assertEqual('test:testds.testTable1', str(tables[0]))
self.assertEqual('test:testds.testTable2', str(tables[1]))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_view_list(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
views = []
for view in ds.views():
views.append(view)
self.assertEqual(1, len(views))
self.assertEqual('test:testds.testView1', str(views[0]))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_table_list_empty(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_empty_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
tables = []
for table in ds:
tables.append(table)
self.assertEqual(0, len(tables))
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_exists(self, mock_api_tables_get):
mock_api_tables_get.return_value = None
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
self.assertTrue(tbl.exists())
mock_api_tables_get.side_effect = datalab.utils.RequestException(404, 'failed')
self.assertFalse(tbl.exists())
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_tables_create(self,
mock_api_datasets_get,
mock_api_tables_list,
mock_api_tables_insert):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = []
schema = TestCases._create_inferred_schema()
mock_api_tables_insert.return_value = {}
with self.assertRaises(Exception) as error:
TestCases._create_table_with_schema(schema)
self.assertEqual('Table test:testds.testTable0 could not be created as it already exists',
str(error.exception))
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
self.assertIsNotNone(TestCases._create_table_with_schema(schema), 'Expected a table')
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_insert_data_no_table(self,
mock_api_datasets_get,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_time_sleep,
mock_uuid):
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.side_effect = datalab.utils.RequestException(404, 'failed')
mock_api_tabledata_insert_all.return_value = {}
mock_api_datasets_get.return_value = None
table = TestCases._create_table_with_schema(TestCases._create_inferred_schema())
df = TestCases._create_data_frame()
with self.assertRaises(Exception) as error:
table.insert_data(df)
self.assertEqual('Table %s does not exist.' % str(table), str(error.exception))
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_missing_field(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep,
mock_uuid,):
# Truncate the schema used when creating the table so we have an unmatched column in insert.
schema = TestCases._create_inferred_schema()[:2]
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = None
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_list.return_value = []
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
df = TestCases._create_data_frame()
with self.assertRaises(Exception) as error:
table.insert_data(df)
self.assertEqual('Table does not contain field headers', str(error.exception))
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_insert_data_mismatched_schema(self,
mock_api_datasets_get,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_time_sleep,
mock_uuid):
# Change the schema used when creating the table so we get a mismatch when inserting.
schema = TestCases._create_inferred_schema()
schema[2]['type'] = 'STRING'
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
mock_api_datasets_get.return_value = None
table = TestCases._create_table_with_schema(schema)
df = TestCases._create_data_frame()
with self.assertRaises(Exception) as error:
table.insert_data(df)
self.assertEqual('Field headers in data has type FLOAT but in table has type STRING',
str(error.exception))
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dataframe(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema()
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
df = TestCases._create_data_frame()
result = table.insert_data(df)
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3}}
])
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dictlist(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema()
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
result = table.insert_data([
{u'column': 'r0', u'headers': 10.0, u'some': 0},
{u'column': 'r1', u'headers': 10.0, u'some': 1},
{u'column': 'r2', u'headers': 10.0, u'some': 2},
{u'column': 'r3', u'headers': 10.0, u'some': 3}
])
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3}}
])
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dictlist_index(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema('Index')
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
result = table.insert_data([
{u'column': 'r0', u'headers': 10.0, u'some': 0},
{u'column': 'r1', u'headers': 10.0, u'some': 1},
{u'column': 'r2', u'headers': 10.0, u'some': 2},
{u'column': 'r3', u'headers': 10.0, u'some': 3}
], include_index=True)
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0, 'Index': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1, 'Index': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2, 'Index': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3, 'Index': 3}}
])
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dictlist_named_index(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema('Row')
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
result = table.insert_data([
{u'column': 'r0', u'headers': 10.0, u'some': 0},
{u'column': 'r1', u'headers': 10.0, u'some': 1},
{u'column': 'r2', u'headers': 10.0, u'some': 2},
{u'column': 'r3', u'headers': 10.0, u'some': 3}
], include_index=True, index_name='Row')
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0, 'Row': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1, 'Row': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2, 'Row': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3, 'Row': 3}}
])
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.jobs_insert_load')
@mock.patch('datalab.bigquery._api.Api.jobs_get')
def test_table_load(self, mock_api_jobs_get, mock_api_jobs_insert_load, mock_api_tables_get):
schema = TestCases._create_inferred_schema('Row')
mock_api_jobs_get.return_value = {'status': {'state': 'DONE'}}
mock_api_jobs_insert_load.return_value = None
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
job = tbl.load('gs://foo')
self.assertIsNone(job)
mock_api_jobs_insert_load.return_value = {'jobReference': {'jobId': 'bar'}}
job = tbl.load('gs://foo')
self.assertEquals('bar', job.id)
@mock.patch('datalab.bigquery._api.Api.table_extract')
@mock.patch('datalab.bigquery._api.Api.jobs_get')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_extract(self, mock_api_tables_get, mock_api_jobs_get, mock_api_table_extract):
mock_api_tables_get.return_value = {}
mock_api_jobs_get.return_value = {'status': {'state': 'DONE'}}
mock_api_table_extract.return_value = None
tbl = datalab.bigquery.Table('testds.testTable0', context=self._create_context())
job = tbl.extract('gs://foo')
self.assertIsNone(job)
mock_api_table_extract.return_value = {'jobReference': {'jobId': 'bar'}}
job = tbl.extract('gs://foo')
self.assertEquals('bar', job.id)
@mock.patch('datalab.bigquery._api.Api.tabledata_list')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_to_dataframe(self, mock_api_tables_get, mock_api_tabledata_list):
schema = self._create_inferred_schema()
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_list.return_value = {
'rows': [
{'f': [{'v': 1}, {'v': 'foo'}, {'v': 3.1415}]},
{'f': [{'v': 2}, {'v': 'bar'}, {'v': 0.5}]},
]
}
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
df = tbl.to_dataframe()
self.assertEquals(2, len(df))
self.assertEquals(1, df['some'][0])
self.assertEquals(2, df['some'][1])
self.assertEquals('foo', df['column'][0])
self.assertEquals('bar', df['column'][1])
self.assertEquals(3.1415, df['headers'][0])
self.assertEquals(0.5, df['headers'][1])
def test_encode_dict_as_row(self):
when = dt.datetime(2001, 2, 3, 4, 5, 6, 7)
row = datalab.bigquery.Table._encode_dict_as_row({'fo@o': 'b@r', 'b+ar': when}, {})
self.assertEqual({'foo': 'b@r', 'bar': '2001-02-03T04:05:06.000007'}, row)
def test_decorators(self):
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
tbl2 = tbl.snapshot(dt.timedelta(hours=-1))
self.assertEquals('test:testds.testTable0@-3600000', str(tbl2))
with self.assertRaises(Exception) as error:
tbl2 = tbl2.snapshot(dt.timedelta(hours=-2))
self.assertEqual('Cannot use snapshot() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl2.window(dt.timedelta(hours=-2), 0)
self.assertEqual('Cannot use window() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl.snapshot(dt.timedelta(days=-8))
self.assertEqual(
'Invalid snapshot relative when argument: must be within 7 days: -8 days, 0:00:00',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl.snapshot(dt.timedelta(days=-8))
self.assertEqual(
'Invalid snapshot relative when argument: must be within 7 days: -8 days, 0:00:00',
str(error.exception))
tbl2 = tbl.snapshot(dt.timedelta(days=-1))
self.assertEquals('test:testds.testTable0@-86400000', str(tbl2))
with self.assertRaises(Exception) as error:
tbl.snapshot(dt.timedelta(days=1))
self.assertEqual('Invalid snapshot relative when argument: 1 day, 0:00:00',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl2 = tbl.snapshot(1000)
self.assertEqual('Invalid snapshot when argument type: 1000',
str(error.exception))
self.assertEquals('test:testds.testTable0@-86400000', str(tbl2))
when = dt.datetime.utcnow() + dt.timedelta(1)
with self.assertRaises(Exception) as error:
tbl.snapshot(when)
self.assertEqual('Invalid snapshot absolute when argument: %s' % when,
str(error.exception))
when = dt.datetime.utcnow() - dt.timedelta(8)
with self.assertRaises(Exception) as error:
tbl.snapshot(when)
self.assertEqual('Invalid snapshot absolute when argument: %s' % when,
str(error.exception))
def test_window_decorators(self):
# The at test above already tests many of the conversion cases. The extra things we
# have to test are that we can use two values, we get a meaningful default for the second
# if we pass None, and that the first time comes before the second.
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
tbl2 = tbl.window(dt.timedelta(hours=-1))
self.assertEquals('test:testds.testTable0@-3600000-0', str(tbl2))
with self.assertRaises(Exception) as error:
tbl2 = tbl2.window(-400000, 0)
self.assertEqual('Cannot use window() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl2.snapshot(-400000)
self.assertEqual('Cannot use snapshot() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl.window(dt.timedelta(0), dt.timedelta(hours=-1))
self.assertEqual(
'window: Between arguments: begin must be before end: 0:00:00, -1 day, 23:00:00',
str(error.exception))
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.table_update')
def test_table_update(self, mock_api_table_update, mock_api_tables_get):
schema = self._create_inferred_schema()
info = {'schema': {'fields': schema}, 'friendlyName': 'casper',
'description': 'ghostly logs',
'expirationTime': calendar.timegm(dt.datetime(2020, 1, 1).utctimetuple()) * 1000}
mock_api_tables_get.return_value = info
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
new_name = 'aziraphale'
new_description = 'demon duties'
new_schema = [{'name': 'injected', 'type': 'FLOAT'}]
new_schema.extend(schema)
new_expiry = dt.datetime(2030, 1, 1)
tbl.update(new_name, new_description, new_expiry, new_schema)
name, info = mock_api_table_update.call_args[0]
self.assertEqual(tbl.name, name)
self.assertEqual(new_name, tbl.metadata.friendly_name)
self.assertEqual(new_description, tbl.metadata.description)
self.assertEqual(new_expiry, tbl.metadata.expires_on)
self.assertEqual(len(new_schema), len(tbl.schema))
def test_table_to_query(self):
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
q = tbl.to_query()
self.assertEqual('SELECT * FROM [test:testds.testTable0]', q.sql)
q = tbl.to_query('foo, bar')
self.assertEqual('SELECT foo, bar FROM [test:testds.testTable0]', q.sql)
q = tbl.to_query(['bar', 'foo'])
self.assertEqual('SELECT bar,foo FROM [test:testds.testTable0]', q.sql)
@staticmethod
def _create_context():
project_id = 'test'
creds = mock.Mock(spec=google.auth.credentials.Credentials)
return datalab.context.Context(project_id, creds)
@staticmethod
def _create_table(name):
return datalab.bigquery.Table(name, TestCases._create_context())
@staticmethod
def _create_table_info_result(ts=None):
if ts is None:
ts = dt.datetime.utcnow()
epoch = dt.datetime.utcfromtimestamp(0)
timestamp = (ts - epoch).total_seconds() * 1000
return {
'description': 'Daily Logs Table',
'friendlyName': 'Logs',
'numBytes': 1000,
'numRows': 2,
'creationTime': timestamp,
'lastModifiedTime': timestamp,
'schema': {
'fields': [
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'val', 'type': 'INTEGER', 'mode': 'NULLABLE'}
]
}
}
@staticmethod
def _create_table_info_nested_schema_result(ts=None):
if ts is None:
ts = dt.datetime.utcnow()
epoch = dt.datetime.utcfromtimestamp(0)
timestamp = (ts - epoch).total_seconds() * 1000
return {
'description': 'Daily Logs Table',
'friendlyName': 'Logs',
'numBytes': 1000,
'numRows': 2,
'creationTime': timestamp,
'lastModifiedTime': timestamp,
'schema': {
'fields': [
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'val', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'more', 'type': 'RECORD', 'mode': 'REPEATED',
'fields': [
{'name': 'xyz', 'type': 'INTEGER', 'mode': 'NULLABLE'}
]
}
]
}
}
@staticmethod
def _create_dataset(dataset_id):
return datalab.bigquery.Dataset(dataset_id, context=TestCases._create_context())
@staticmethod
def _create_table_list_result():
return {
'tables': [
{
'type': 'TABLE',
'tableReference': {'projectId': 'test', 'datasetId': 'testds', 'tableId': 'testTable1'}
},
{
'type': 'VIEW',
'tableReference': {'projectId': 'test', 'datasetId': 'testds', 'tableId': 'testView1'}
},
{
'type': 'TABLE',
'tableReference': {'projectId': 'test', 'datasetId': 'testds', 'tableId': 'testTable2'}
}
]
}
@staticmethod
def _create_table_list_empty_result():
return {
'tables': []
}
@staticmethod
def _create_data_frame():
data = {
'some': [
0, 1, 2, 3
],
'column': [
'r0', 'r1', 'r2', 'r3'
],
'headers': [
10.0, 10.0, 10.0, 10.0
]
}
return pandas.DataFrame(data)
@staticmethod
def _create_inferred_schema(extra_field=None):
schema = [
{'name': 'some', 'type': 'INTEGER'},
{'name': 'column', 'type': 'STRING'},
{'name': 'headers', 'type': 'FLOAT'},
]
if extra_field:
schema.append({'name': extra_field, 'type': 'INTEGER'})
return schema
@staticmethod
def _create_table_with_schema(schema, name='test:testds.testTable0'):
return datalab.bigquery.Table(name, TestCases._create_context()).create(schema)
class _uuid(object):
@property
def hex(self):
return '#'
@staticmethod
def _create_uuid():
return TestCases._uuid()
| apache-2.0 |
Yurlungur/FLRW | plot_all_variables.py | 1 | 2036 | #!/usr/bin/env python2
# Author: Jonah Miller ([email protected])
# Time-stamp: <2013-12-14 14:15:26 (jonah)>
# This is a companion program to my FLRW simulator. It takes a data
# file and generates a plot of the scale factor, its derivative, the
# density, and the pressure of the matter.
# Call the program with
# python2 plot_all_variables.py filename.dat
# Imports
# ----------------------------------------------------------------------
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
# ----------------------------------------------------------------------
# Parameters for plots
# ----------------------------------------------------------------------
xlabel = "Cosmological time. (geometrized units)"
my_linewidth = 5
fontsize = 20
# ----------------------------------------------------------------------
def load_data(filename):
"""
Takes a file name as a string and extracts the simulation data
from it. Returns a tuple of arrays:
(times,a_values,b_values,rho_values,p_values)
"""
with open(filename,'r') as f:
data = np.loadtxt(filename).transpose()
times = data[0]
a_values = data[1]
rho_values = data[2]
p_values = data[3]
return times,a_values,rho_values,p_values
def plot_data(times,a_values,rho_values,p_values):
"""
Takes the times,a_values,b_values,rho_values, and p_values
and makes a nice plot out of them. Takes labels, etc. into account.
"""
mpl.rcParams.update({'font.size': fontsize})
lines = [plt.plot(times,y_set,linewidth=my_linewidth)
for y_set in [a_values,rho_values,p_values]]
plt.legend(["a",r'$\rho$',"p"])
plt.xlabel(xlabel)
plt.show()
return
def plot_file(filename):
"Plots the data in a file."
times,a_values,rho_values,p_values = load_data(filename)
plot_data(times,a_values,rho_values,p_values)
return
if __name__ == "__main__":
for filename in sys.argv[1:]:
plot_file(filename)
| mit |
CopyChat/Plotting | Python/TestCode/colormaps_reference.py | 4 | 3411 | """
Reference for colormaps included with Matplotlib.
This reference example shows all colormaps included with Matplotlib. Note that
any colormap listed here can be reversed by appending "_r" (e.g., "pink_r").
These colormaps are divided into the following categories:
Sequential:
These colormaps are approximately monochromatic colormaps varying smoothly
between two color tones---usually from low saturation (e.g. white) to high
saturation (e.g. a bright blue). Sequential colormaps are ideal for
representing most scientific data since they show a clear progression from
low-to-high values.
Diverging:
These colormaps have a median value (usually light in color) and vary
smoothly to two different color tones at high and low values. Diverging
colormaps are ideal when your data has a median value that is significant
(e.g. 0, such that positive and negative values are represented by
different colors of the colormap).
Qualitative:
These colormaps vary rapidly in color. Qualitative colormaps are useful for
choosing a set of discrete colors. For example::
color_list = plt.cm.Set3(np.linspace(0, 1, 12))
gives a list of RGB colors that are good for plotting a series of lines on
a dark background.
Miscellaneous:
Colormaps that don't fit into the categories above.
"""
import numpy as np
import matplotlib.pyplot as plt
cmaps = [('Sequential', ['binary', 'Blues', 'BuGn', 'BuPu', 'gist_yarg',
'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool', 'copper',
'gist_gray', 'gist_heat', 'gray', 'hot', 'pink',
'spring', 'summer', 'winter']),
('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'seismic']),
('Qualitative', ['Accent', 'Dark2', 'hsv', 'Paired', 'Pastel1',
'Pastel2', 'Set1', 'Set2', 'Set3', 'spectral']),
('Miscellaneous', ['gist_earth', 'gist_ncar', 'gist_rainbow',
'gist_stern', 'jet', 'brg', 'CMRmap', 'cubehelix',
'gnuplot', 'gnuplot2', 'ocean', 'rainbow',
'terrain', 'flag', 'prism'])]
nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
def plot_color_gradients(cmap_category, cmap_list):
fig, axes = plt.subplots(nrows=nrows)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title(cmap_category + ' colormaps', fontsize=14)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
for cmap_category, cmap_list in cmaps:
plot_color_gradients(cmap_category, cmap_list)
plt.show()
| gpl-3.0 |
KNMI/VERCE | scigateway-api/src/scigateway_app.py | 2 | 3047 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import json
from flask import request, send_file, Flask
from scigateway_services import mtensor
from scigateway_services.wfs_input_generator.input_file_generator import InputFileGenerator
from pymongo import MongoClient
import sys
DATA_PATH = "../mt/"
ROOT_URL = "/"
app = Flask("scigateway-api")
mtstore = mtensor.MtStore(DATA_PATH)
uri=sys.argv[1]
db=MongoClient(uri,maxPoolSize=100)["verce-prov"]
def str_to_bool_pass(value):
if value == "true":
return True
if value == "false":
return False
return value
@app.route(ROOT_URL)
def start():
return "VERCE scigateway-api service."
@app.route(ROOT_URL + "version")
def version():
"""
Return the version string of the webservice.
"""
return "0.0.3"
@app.route(ROOT_URL + "mt/components-image")
def components_image():
mrr = request.args['mrr']
mtt = request.args['mtt']
mpp = request.args['mpp']
mrt = request.args['mrt']
mrp = request.args['mrp']
mtp = request.args['mtp']
filename = mtstore.produceImage(id, mrr=float(mrr), mtt=float(mtt), mpp=float(mpp), mrt=float(mrt),
mrp=float(mrp), mtp=float(mtp))
return send_file(filename, mimetype='image/png')
@app.route(ROOT_URL + "mt/nodal-plane-image")
def nodal_plane_image():
strike = request.args['strike']
dip = request.args['dip']
rake = request.args['rake']
filename = mtstore.produceImage(strike=float(strike), dip=float(dip), rake=float(rake))
return send_file(filename, mimetype='image/png')
@app.route(ROOT_URL + "solver/par-file/<solver>", methods=['POST'])
def solver_par_file(solver):
solver_conf = json.loads(str(request.form["jsondata"]))
gen = InputFileGenerator()
fields = solver_conf["fields"]
for x in fields:
gen.add_configuration({x["name"]: str_to_bool_pass(x["value"])})
par_file = gen.write_par_file(format=solver.upper())
return par_file, 200, {'Content-Type': 'application/octet-stream'}
@app.route(ROOT_URL + "solver/<solverId>")
def solver_config(solverId):
solver = db['solver']
try:
solver = solver.find_one({"_id": solverId})
if (solver != None):
solver.update({"success": True})
userId = request.args["userId"][0] if "userId" in request.args else False
def userFilter(item):
return (not "users" in item) or (userId and userId in item["users"])
def velmodFilter(item):
item["velmod"] = filter(userFilter, item["velmod"])
return item
solver["meshes"] = map(velmodFilter, filter(userFilter, solver["meshes"]))
return json.dumps(solver), 200, {'Content-type': 'application/json'}
else:
return {"success": False, "error": "Solver " + solverId + " not Found"}
except Exception, e:
return {"success": False, "error": str(e)}
if __name__ == "__main__":
app.run(debug=True) | mit |
zorojean/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
dino-dna/donut | packages/regression/src/DonutLearner.py | 1 | 1836 | import json
import os
import sys
import numpy as np
from scipy.optimize import differential_evolution
from sklearn import linear_model
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.neighbors import KNeighborsRegressor
def log(msg):
if os.getenv('DEBUG') is not None:
sys.stderr.write(f'donut:py_regression: {msg}\n')
class DonutLearner:
@staticmethod
def knn(X, Y):
neigh = KNeighborsRegressor()
neigh.fit(X, Y)
def explore(x):
score = -1 * neigh.predict([x])
return score
minimized = differential_evolution(explore, ((0, 1), (0, 1), (0, 1), (0, 1), (0, 1)))
return {
'X_min': list(minimized.x),
'score': neigh.score(X, Y)
}
@staticmethod
def ridge_regression_with_sim_ann(X, Y):
model = make_pipeline(PolynomialFeatures(2), linear_model.Ridge())
reg = model.fit(X, Y)
def explore(x):
score = -1 * reg.predict([x])
return score
minimized = differential_evolution(explore, ((0, 1), (0, 1), (0, 1), (0, 1), (0, 1)))
return {
'X_min': list(minimized.x),
'score': reg.score(X, Y)
}
@staticmethod
def from_stdin():
log('reading input')
data = ""
for line in sys.stdin:
data += line
try:
request = json.loads(data)
break
except:
pass
log('input read complete')
response = {}
for learner in request['learners']:
X = request['X']
log(f'execing learner ({len(X)} records): {learner}')
reg = getattr(DonutLearner, learner)(X, request['Y'])
response[learner] = reg
log('writing response')
print(json.dumps(response)) # send to stdout for parsing
if __name__ == "__main__":
log('Launching DonutLearner')
DonutLearner.from_stdin()
log('complete.')
exit(0)
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/indexes/period/test_period.py | 4 | 29718 | import pytest
import numpy as np
from numpy.random import randn
from datetime import timedelta
import pandas as pd
from pandas.util import testing as tm
from pandas import (PeriodIndex, period_range, notnull, DatetimeIndex, NaT,
Index, Period, Int64Index, Series, DataFrame, date_range,
offsets, compat)
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
tm.assert_index_equal(result, Index(idx.asi8))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
pytest.raises(ValueError, idx.astype, str)
pytest.raises(ValueError, idx.astype, float)
pytest.raises(ValueError, idx.astype, 'timedelta64')
pytest.raises(ValueError, idx.astype, 'timedelta64[ns]')
def test_pickle_compat_construction(self):
pass
def test_pickle_round_trip(self):
for freq in ['D', 'M', 'A']:
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].asfreq('H', how='start'), method) == 1
assert idx.get_loc(idx[1].to_timestamp(), method) == 1
assert idx.get_loc(idx[1].to_timestamp()
.to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
idx = pd.period_range('2000-01-01', periods=5)[::2]
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with pytest.raises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
i = self.create_index()
cond = [False] + [True] * (len(i) - 1)
klasses = [list, tuple, np.array, Series]
expected = pd.PeriodIndex([pd.NaT] + i[1:].tolist(), freq='D')
for klass in klasses:
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
tm.assert_index_equal(res, exp)
assert res.freqstr == 'D'
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
tm.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna(
pd.Period('2011-01-01', freq='D')), exp)
def test_no_millisecond_field(self):
with pytest.raises(AttributeError):
DatetimeIndex.millisecond
with pytest.raises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_freq(self):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
assert pi.dtype_str == 'period[M]'
assert pi.dtype_str == str(pi.dtype)
pi = pd.PeriodIndex([], freq='3M')
assert pi.dtype_str == 'period[3M]'
assert pi.dtype_str == str(pi.dtype)
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'dayofyear',
'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
assert expected == result
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_asobject_like(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq('M'))
assert not index.is_(index.asfreq('A'))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
def test_contains_nat(self):
# see gh-13582
idx = period_range('2007-01', freq='M', periods=10)
assert pd.NaT not in idx
assert None not in idx
assert float('nan') not in idx
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert pd.NaT in idx
assert None in idx
assert float('nan') in idx
assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_start_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
tm.assert_index_equal(index.end_time, expected_index)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT',
'2011-05'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_ndarray_compat_properties(self):
if compat.is_platform_32bit():
pytest.skip("skipping on 32bit")
super(TestPeriodIndex, self).test_ndarray_compat_properties()
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D', name='name')
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s['05Q4'] == s[2]
def test_numpy_repeat(self):
index = period_range('20010101', periods=2)
expected = PeriodIndex([Period('2001-01-01'), Period('2001-01-01'),
Period('2001-01-02'), Period('2001-01-02')])
tm.assert_index_equal(np.repeat(index, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, index, 2, axis=1)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == 'M'
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
tm.assert_index_equal(result, expected)
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
| mit |
AlexanderFabisch/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 17 | 25508 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
r24mille/think_stats | chapter_two/conditional_ans.py | 1 | 2761 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import Pmf
import descriptive
import matplotlib.pyplot as pyplot
import myplot
import risk_ans
def ConditionPmf(pmf, filter_func, name='conditional'):
"""Computes a conditional PMF based on a filter function.
Args:
pmf: Pmf object
filter_func: callable that takes a value from the Pmf and returns
a boolean
name: string name for the new pmf
Returns:
new Pmf object
"""
cond_pmf = pmf.Copy(name)
vals = [val for val in pmf.Values() if filter_func(val)]
for val in vals:
cond_pmf.Remove(val)
cond_pmf.Normalize()
return cond_pmf
def ConditionOnWeeks(pmf, week=39, name='conditional'):
"""Computes a PMF conditioned on the given number of weeks.
Args:
pmf: Pmf object
week: the current duration of the pregnancy
name: string name for the new pmf
Returns:
new Pmf object
"""
def filter_func(x):
return x < week
cond = ConditionPmf(pmf, filter_func, name)
return cond
def MakeFigure(firsts, others):
"""Makes a figure showing...
"""
weeks = range(35, 46)
# probs is a map from table name to list of conditional probabilities
probs = {}
for table in [firsts, others]:
name = table.pmf.name
probs[name] = []
for week in weeks:
cond = ConditionOnWeeks(table.pmf, week)
prob = cond.Prob(week)
print week, prob, table.pmf.name
probs[name].append(prob)
# make a plot with one line for each table
pyplot.clf()
for name, ps in probs.iteritems():
pyplot.plot(weeks, ps, label=name)
print name, ps
myplot.Save(root='conditional',
xlabel='weeks',
ylabel=r'Prob{x $=$ weeks | x $\geq$ weeks}',
title='Conditional Probability')
def RelativeRisk(first, others, week=38):
"""Computes relative risk of the conditional prob of having
a baby for each week, first babies compared to others.
first: Pregnancies table
others: Pregnancies table
week:
"""
print type(first)
first_cond = ConditionOnWeeks(first.pmf, week, 'first babies')
other_cond = ConditionOnWeeks(others.pmf, week, 'others')
risk_ans.ComputeRelativeRisk(first_cond, other_cond)
def main():
pool, firsts, others = descriptive.MakeTables()
RelativeRisk(firsts, others)
MakeFigure(firsts, others)
if __name__ == "__main__":
main()
| gpl-3.0 |
soylentdeen/BlurryApple | Tools/infl_func_sub.py | 1 | 2579 | import pyfits
import matplotlib.pyplot as pyplot
import scipy
import numpy
datadir = '/home/deen/Data/GRAVITY/InfluenceFunctions/IFs_nov82013/'
fig = pyplot.figure(0, figsize=(20, 15))
fig.clear()
influence_functions = []
N_act = 60
rebinned_img_width = 64
rebinned_IF_cube = numpy.zeros((N_act, rebinned_img_width, rebinned_img_width))
for i in range(60):
print i
ax = fig.add_axes([0.1+(i/8)*0.1, 0.1+(i%8)*0.1, 0.1, 0.1])
plus_file = datadir+'poke+'+str(i)+'.fits'
minus_file = datadir+'poke-'+str(i)+'.fits'
plus = pyfits.getdata(plus_file)
minus = pyfits.getdata(minus_file)
plus_hdr = pyfits.getheader(plus_file)
minus_hdr = pyfits.getheader(minus_file)
img_shape = plus.shape
nonapval = plus_hdr["NONAPVAL"]
plus_mask = numpy.not_equal(plus, nonapval)
minus_mask = numpy.not_equal(minus, nonapval)
data_mask = numpy.all(numpy.vstack((plus_mask.ravel(),
minus_mask.ravel())), axis=0).reshape(img_shape)
y_beg = min(numpy.where(data_mask==True)[0])
y_end = max(numpy.where(data_mask==True)[0])
x_beg = min(numpy.where(data_mask==True)[1])
x_end = max(numpy.where(data_mask==True)[1])
subtraction = numpy.zeros(img_shape)
subtraction[data_mask] = plus[data_mask] - minus[data_mask]
influence_functions.append(subtraction/10000.0)
bin_size = max([y_end-y_beg, x_end-x_beg])/rebinned_img_width
if_img = subtraction/10000.0 # Convert from Angstroems to microns
#"""
for xbin in range(rebinned_img_width):
for ybin in range(rebinned_img_width):
N_sum = 0
for x in range(x_beg + xbin*bin_size, x_beg+(xbin+1)*bin_size):
for y in range(y_beg+ybin*bin_size, y_beg+(ybin+1)*bin_size):
if (x < img_shape[1] and y < img_shape[0] and
data_mask[y,x]==True):
rebinned_IF_cube[i, ybin, xbin] += if_img[y,x]
N_sum += 1
if N_sum > 0:
rebinned_IF_cube[i, ybin, xbin] /= N_sum
else:
rebinned_IF_cube[i, ybin, xbin] = 0.0
ax.imshow(rebinned_IF_cube[i])
ax.set_yticks([])
ax.set_xticks([])
#"""
IFs = numpy.array(influence_functions)
IF_cube_hdu = pyfits.PrimaryHDU( rebinned_IF_cube.astype(numpy.float32))
IF_cube_hdu.writeto('IF_cube_zeros.fits', clobber=True)
#IF_cube_hdu = pyfits.PrimaryHDU( IFs.astype(numpy.float32))
#IF_cube_hdu.writeto('IF_cube_HR.fits', clobber=True)
#pyfits.writeto('IF_cube.fits', IFs)
#fig.show()
#fig.savefig("IF_cube.png")
| gpl-2.0 |
YzPaul3/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_bernoulli_synthetic_data_GBM_medium.py | 8 | 3234 | from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o import H2OFrame
import numpy as np
import numpy.random
import scipy.stats
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def bernoulli_synthetic_data_gbm_medium():
# Generate training dataset (adaptation of http://www.stat.missouri.edu/~speckman/stat461/boost.R)
train_rows = 10000
train_cols = 10
# Generate variables V1, ... V10
X_train = np.random.randn(train_rows, train_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_train = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_train,X_train).tolist()]])
# Train scikit gbm
# TODO: grid-search
distribution = "bernoulli"
ntrees = 150
min_rows = 1
max_depth = 2
learn_rate = .01
nbins = 20
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(X_train,y_train)
# Generate testing dataset
test_rows = 2000
test_cols = 10
# Generate variables V1, ... V10
X_test = np.random.randn(test_rows, test_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_test = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_test,X_test).tolist()]])
# Score (AUC) the scikit gbm model on the test data
auc_sci = roc_auc_score(y_test, gbm_sci.predict_proba(X_test)[:,1])
# Compare this result to H2O
xtrain = np.transpose(X_train).tolist()
ytrain = y_train.tolist()
xtest = np.transpose(X_test).tolist()
ytest = y_test.tolist()
train_h2o = H2OFrame(list(zip(*[ytrain]+xtrain)))
test_h2o = H2OFrame(list(zip(*[ytest]+xtest)))
train_h2o["C1"] = train_h2o["C1"].asfactor()
test_h2o["C1"] = test_h2o["C1"].asfactor()
gbm_h2o = H2OGradientBoostingEstimator(distribution=distribution,
ntrees=ntrees,
min_rows=min_rows,
max_depth=max_depth,
learn_rate=learn_rate,
nbins=nbins)
gbm_h2o.train(x=list(range(1,train_h2o.ncol)), y="C1", training_frame=train_h2o)
gbm_perf = gbm_h2o.model_performance(test_h2o)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert abs(auc_h2o - auc_sci) < 1e-2, "h2o (auc) performance degradation, with respect to scikit. h2o auc: {0} " \
"scickit auc: {1}".format(auc_h2o, auc_sci)
if __name__ == "__main__":
pyunit_utils.standalone_test(bernoulli_synthetic_data_gbm_medium)
else:
bernoulli_synthetic_data_gbm_medium()
| apache-2.0 |
JacekPierzchlewski/RxCS | examples/dictionaries/dict_IDFT_ex0.py | 1 | 2391 | """
This script is an example on how to use the Inverse Discrete Fourier Transform
(IDFT) Dictionary module. |br|
In this example signal with 1 cosine tone is generated. |br|
After the generation, the signal is plotted in the time domain.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
1.0 | 30-MAY-2014 : * Version 1.0 released. |br|
1.0-r1 | 23-FEB-2015 : * Header is added. |br|
2.0 | 20-AUG-2015 : * Version 2.0 released
(adjusted to v2.0 of the dictionary generator) |br|
2.1 | 23-MAR-2017 : * Symmetrical frequency distribution is set
*License*:
BSD 2-Clause
"""
from __future__ import division
import rxcs
import numpy as np
import matplotlib.pyplot as plt
def _dict_IDFT_ex0():
# Things on the table:
IDFT = rxcs.cs.dict.IDFT() # IDFT dictionary generator
# Configure the IDF generator
IDFT.tS = 1e-3 # Time of the dictionary is 1 ms
IDFT.fR = 40e3 # Representation sampling frequency is 40 kHz
IDFT.fDelta = 1e3 # The frequency separation between tones
IDFT.nTones = 10 # The number of tones in the dictionary
IDFT.bFreqSym = 1 # Symmetrical frequency distribution
IDFT.run() # Generate the dictionary
# -----------------------------------------------------------------
# Generate the signal using the dictionary
# Vector with Fourier coefficients
vFcoef = np.zeros((1,20)).astype(complex)
vFcoef[0, 0] = 1
vFcoef[0, 19] = 1
# Get the dictionary matrix
mIDFT = IDFT.mDict
# Generate a signal and change its shape to a single vector
vSig = np.real(np.dot(vFcoef,mIDFT))
vSig.shape = (vSig.size,)
# -----------------------------------------------------------------
# Plot signal in the time domain
vT = IDFT.vT # Get the time vector
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(111)
hSubPlot1.grid(True)
hSubPlot1.set_title('Signal')
hSubPlot1.set_xlabel('Time [s]')
hSubPlot1.plot(vT, vSig)
hSubPlot1.set_xlim(min(vT), max(vT))
hSubPlot1.set_ylim(-1.1, 1.1)
plt.show(block=True)
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_dict_IDFT_ex0()
| bsd-2-clause |
kiyoto/statsmodels | statsmodels/examples/ex_feasible_gls_het.py | 34 | 4267 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during developement.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
There might be something fishy with the example, but I don't see it.
Or maybe it's supposed to be this way because in the first case I don't
include a constant and in the second case I include some of the same
regressors as in the main equation.
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
examples = ['ex1']
if 'ex1' in examples:
#from tut_ols_wls
nsample = 1000
sig = 0.5
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, (x1-5)**2, np.ones(nsample)]
np.random.seed(0)#9876789) #9876543)
beta = [0.5, -0.015, 1.]
y_true2 = np.dot(X, beta)
w = np.ones(nsample)
w[nsample*6//10:] = 4 #Note this is the squared value
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
#y2[nsample*6/10:] = y_true2[nsample*6/10:] + sig*4. * np.random.normal(size=nsample*4/10)
y2 = y_true2 + sig*np.sqrt(w)* np.random.normal(size=nsample)
X2 = X[:,[0,2]]
X2 = X
res_ols = OLS(y2, X2).fit()
print('OLS beta estimates')
print(res_ols.params)
print('OLS stddev of beta')
print(res_ols.bse)
print('\nWLS')
mod0 = GLSHet2(y2, X2, exog_var=w)
res0 = mod0.fit()
print('new version')
mod1 = GLSHet(y2, X2, exog_var=w)
res1 = mod1.iterative_fit(2)
print('WLS beta estimates')
print(res1.params)
print(res0.params)
print('WLS stddev of beta')
print(res1.bse)
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print(res1.model.weights/res1.model.weights.max())
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), 1./w, 14)
print('residual regression params')
print(res1.results_residual_regression.params)
print('scale of model ?')
print(res1.scale)
print('unweighted residual variance, note unweighted mean is not zero')
print(res1.resid.var())
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
z = (w[:,None] == np.unique(w)).astype(float) #dummy variable
mod2 = GLSHet(y2, X2, exog_var=z)
res2 = mod2.iterative_fit(2)
print(res2.params)
import statsmodels.api as sm
z = sm.add_constant(w)
mod3 = GLSHet(y2, X2, exog_var=z)
res3 = mod3.iterative_fit(8)
print(res3.params)
print("np.array(res3.model.history['ols_params'])")
print(np.array(res3.model.history['ols_params']))
print("np.array(res3.model.history['self_params'])")
print(np.array(res3.model.history['self_params']))
print(np.unique(res2.model.weights)) #for discrete z only, only a few uniques
print(np.unique(res3.model.weights))
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/frame/test_arithmetic.py | 1 | 59527 | from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
class DummyElement:
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self) -> str:
return f"DummyElement({self.value}, {self.dtype})"
def __repr__(self) -> str:
return str(self)
def astype(self, dtype, copy=False):
self.dtype = dtype
return self
def view(self, dtype):
return type(self)(self.value.view(dtype), dtype)
def any(self, axis=None):
return bool(self.value)
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = DataFrame(np.arange(6).reshape((3, 2)))
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"])
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({"a": arr})
df2 = DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = DataFrame({"A": ii, "B": ii})
ser = Series([0, 0])
res = df.eq(ser, axis=0)
expected = DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = Series(arr)
df = DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = Series(tdi)
df = DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = DataFrame({"A": dti, "B": ser})
other = DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = DataFrame(
{
"A": Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype={"C": None})
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = {"B": "uint64", "C": None}
elif op in ["__add__", "__mul__"]:
dtype = {"C": None}
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype={"C": None})
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = Series([], dtype=object)
df_len0 = DataFrame(columns=["A", "B"])
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = DataFrame({"A": [1, 1], "B": [1, 1]})
expected = DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"value, dtype",
[
(1, "i8"),
(1.0, "f8"),
(2 ** 63, "f8"),
(1j, "complex128"),
(2 ** 63, "complex128"),
(True, "bool"),
(np.timedelta64(20, "ns"), "<m8[ns]"),
(np.datetime64(20, "ns"), "<M8[ns]"),
],
)
@pytest.mark.xfail(reason="GH38630", strict=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.mod,
operator.pow,
],
ids=lambda x: x.__name__,
)
def test_binop_other(self, op, value, dtype):
skip = {
(operator.truediv, "bool"),
(operator.pow, "bool"),
}
e = DummyElement(value, dtype)
s = DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
invalid = {
(operator.pow, "<M8[ns]"),
(operator.mod, "<M8[ns]"),
(operator.truediv, "<M8[ns]"),
(operator.mul, "<M8[ns]"),
(operator.add, "<M8[ns]"),
(operator.pow, "<m8[ns]"),
(operator.mul, "<m8[ns]"),
(operator.sub, "bool"),
(operator.mod, "complex128"),
}
if (op, dtype) in invalid:
warn = None
if (dtype == "<M8[ns]" and op == operator.add) or (
dtype == "<m8[ns]" and op == operator.mul
):
msg = None
elif dtype == "complex128":
msg = "ufunc 'remainder' not supported for the input types"
warn = UserWarning # "evaluating in Python space because ..."
elif op is operator.sub:
msg = "numpy boolean subtract, the `-` operator, is "
warn = UserWarning # "evaluating in Python space because ..."
else:
msg = (
f"cannot perform __{op.__name__}__ with this "
"index type: (DatetimeArray|TimedeltaArray)"
)
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(warn):
op(s, e.value)
elif (op, dtype) in skip:
msg = "operator '.*' not implemented for .* dtypes"
with pytest.raises(NotImplementedError, match=msg):
with tm.assert_produces_warning(UserWarning):
# "evaluating in Python space because ..."
op(s, e.value)
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
with tm.assert_produces_warning(None):
result = op(s, e.value).dtypes
expected = op(s, value).dtypes
tm.assert_series_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = Series(dtype=np.float64)
result = df + ser
expected = DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# Automatic alignment for comparisons deprecated
result = df == ser
expected = DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = DataFrame(df.values.view("M8[ns]"), columns=df.columns)
with tm.assert_produces_warning(FutureWarning):
# Automatic alignment for comparisons deprecated
result = df2 == ser
expected = DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = DataFrame(columns=["A", "B"], dtype=np.float64)
ser = Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": Series(["A", 1.2, np.nan]),
}
df = DataFrame(data)
result = df.sum(axis=1)
expected = Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype={"C": None})
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype={"C": None})
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype={"C": None})
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = {"A": 'int64', "B": 'float64', "C":
# 'int64', "D": 'int64'})
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = {"A": 'int32', "B": 'float64', "C":
# 'int32', "D": 'int64'})
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype={"C": None})
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
df = df_orig.copy()
df2 = df
df["A"] += 1.5
expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
@pytest.mark.parametrize(
"op",
[
"add",
"and",
"div",
"floordiv",
"mod",
"mul",
"or",
"pow",
"sub",
"truediv",
"xor",
],
)
def test_inplace_ops_identity2(self, op):
if op == "div":
return
df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
operand = 2
if op in ("and", "or", "xor"):
# cannot use floats for boolean ops
df["a"] = [True, False, True]
df_copy = df.copy()
iop = f"__i{op}__"
op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
tm.assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops.align_method_FRAME
for val in [
[1, 2, 3],
(1, 2, 3),
np.array([1, 2, 3], dtype=np.int64),
range(1, 4),
]:
expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index)
tm.assert_frame_equal(align(df, val, "index")[1], expected)
expected = DataFrame(
{"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index
)
tm.assert_frame_equal(align(df, val, "columns")[1], expected)
# length mismatch
msg = "Unable to coerce to Series, length must be 3: given 2"
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(
align(df, val, "index")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
tm.assert_frame_equal(
align(df, val, "columns")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
# shape mismatch
msg = "Unable to coerce to DataFrame, shape must be"
val = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.zeros((3, 3, 3))
msg = re.escape(
"Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
)
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
def test_no_warning(self, all_arithmetic_operators):
df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with tm.assert_produces_warning(None):
getattr(df, all_arithmetic_operators)(b)
def test_dunder_methods_binary(self, all_arithmetic_operators):
# GH#??? frame.__foo__ should only accept one argument
df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with pytest.raises(TypeError, match="takes 2 positional arguments"):
getattr(df, all_arithmetic_operators)(b, 0)
def test_align_int_fill_bug(self):
# GH#910
X = np.arange(10 * 10, dtype="float64").reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1["0.X"] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
tm.assert_frame_equal(result, expected)
def test_pow_with_realignment():
# GH#32685 pow has special semantics for operating with null values
left = DataFrame({"A": [0, 1, 2]})
right = DataFrame(index=[0, 1, 2])
result = left ** right
expected = DataFrame({"A": [np.nan, 1.0, np.nan]})
tm.assert_frame_equal(result, expected)
# TODO: move to tests.arithmetic and parametrize
def test_pow_nan_with_zero():
left = DataFrame({"A": [np.nan, np.nan, np.nan]})
right = DataFrame({"A": [0, 0, 0]})
expected = DataFrame({"A": [1.0, 1.0, 1.0]})
result = left ** right
tm.assert_frame_equal(result, expected)
result = left["A"] ** right["A"]
tm.assert_series_equal(result, expected["A"])
def test_dataframe_series_extension_dtypes():
# https://github.com/pandas-dev/pandas/issues/34311
df = DataFrame(np.random.randint(0, 100, (10, 3)), columns=["a", "b", "c"])
ser = Series([1, 2, 3], index=["a", "b", "c"])
expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3)
expected = DataFrame(expected, columns=df.columns, dtype="Int64")
df_ea = df.astype("Int64")
result = df_ea + ser
tm.assert_frame_equal(result, expected)
result = df_ea + ser.astype("Int64")
tm.assert_frame_equal(result, expected)
def test_dataframe_blockwise_slicelike():
# GH#34367
arr = np.random.randint(0, 1000, (100, 10))
df1 = DataFrame(arr)
df2 = df1.copy()
df2.iloc[0, [1, 3, 7]] = np.nan
df3 = df1.copy()
df3.iloc[0, [5]] = np.nan
df4 = df1.copy()
df4.iloc[0, np.arange(2, 5)] = np.nan
df5 = df1.copy()
df5.iloc[0, np.arange(4, 7)] = np.nan
for left, right in [(df1, df2), (df2, df3), (df4, df5)]:
res = left + right
expected = DataFrame({i: left[i] + right[i] for i in left.columns})
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"df, col_dtype",
[
(DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"),
(DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"),
],
)
def test_dataframe_operation_with_non_numeric_types(df, col_dtype):
# GH #22663
expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab"))
expected = expected.astype({"b": col_dtype})
result = df + Series([-1.0], index=list("a"))
tm.assert_frame_equal(result, expected)
def test_arith_reindex_with_duplicates():
# https://github.com/pandas-dev/pandas/issues/35194
df1 = DataFrame(data=[[0]], columns=["second"])
df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"])
result = df1 + df2
expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]])
def test_arith_list_of_arraylike_raise(to_add):
# GH 36702. Raise when trying to add list of array-like to DataFrame
df = DataFrame({"x": [1, 2], "y": [1, 2]})
msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
df + to_add
with pytest.raises(ValueError, match=msg):
to_add + df
def test_inplace_arithmetic_series_update():
# https://github.com/pandas-dev/pandas/issues/36373
df = DataFrame({"A": [1, 2, 3]})
series = df["A"]
vals = series._values
series += 1
assert series._values is vals
expected = DataFrame({"A": [2, 3, 4]})
tm.assert_frame_equal(df, expected)
| bsd-3-clause |
fyffyt/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
vene/marseille | marseille/struct_models.py | 1 | 24113 | """
Pystruct-compatible models.
"""
# Author: Vlad Niculae <[email protected]>
# License: BSD 3-clause
# AD3 is (c) Andre F. T. Martins, LGPLv3.0: http://www.cs.cmu.edu/~ark/AD3/
import warnings
import numpy as np
from sklearn.utils import compute_class_weight
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder, label_binarize
from pystruct.models import StructuredModel
from marseille.inference import loss_augment_unaries, CDCP_ILLEGAL_LINKS
from marseille.argdoc import DocLabel
from marseille.custom_logging import logging
from itertools import permutations
from ad3 import factor_graph as fg
def _binary_2d(y):
if y.shape[1] == 1:
y = np.column_stack([1 - y, y])
return y
def arg_f1_scores(Y_true, Y_pred, **kwargs):
macro = []
micro_true = []
micro_pred = []
for y_true, y_pred in zip(Y_true, Y_pred):
macro.append(f1_score(y_true, y_pred, **kwargs))
micro_true.extend(y_true)
micro_pred.extend(y_pred)
return np.mean(macro), f1_score(micro_true, micro_pred, **kwargs)
class BaseArgumentMixin(object):
def initialize_labels(self, Y):
y_nodes_flat = [y_val for y in Y for y_val in y.nodes]
y_links_flat = [y_val for y in Y for y_val in y.links]
self.prop_encoder_ = LabelEncoder().fit(y_nodes_flat)
self.link_encoder_ = LabelEncoder().fit(y_links_flat)
self.n_prop_states = len(self.prop_encoder_.classes_)
self.n_link_states = len(self.link_encoder_.classes_)
self.prop_cw_ = np.ones_like(self.prop_encoder_.classes_,
dtype=np.double)
self.link_cw_ = compute_class_weight(self.class_weight,
self.link_encoder_.classes_,
y_links_flat)
self.link_cw_ /= self.link_cw_.min()
logging.info('Setting node class weights {}'.format(", ".join(
"{}: {}".format(lbl, cw) for lbl, cw in zip(
self.prop_encoder_.classes_, self.prop_cw_))))
logging.info('Setting link class weights {}'.format(", ".join(
"{}: {}".format(lbl, cw) for lbl, cw in zip(
self.link_encoder_.classes_, self.link_cw_))))
def _round(self, prop_marg, link_marg, prop_unary=None, link_unary=None,
inverse_transform=True):
# ensure ties are broken according to unary scores
if prop_unary is not None:
prop_unary = prop_unary.copy()
prop_unary -= np.min(prop_unary)
prop_unary /= np.max(prop_unary) * np.max(prop_marg)
prop_marg[prop_marg > 1e-9] += prop_unary[prop_marg > 1e-9]
if link_unary is not None:
link_unary = link_unary.copy()
link_unary -= np.min(link_unary)
link_unary /= np.max(link_unary) * np.max(link_marg)
link_marg[link_marg > 1e-9] += link_unary[link_marg > 1e-9]
y_hat_props = np.argmax(prop_marg, axis=1)
y_hat_links = np.argmax(link_marg, axis=1)
if inverse_transform:
y_hat_props = self.prop_encoder_.inverse_transform(y_hat_props)
y_hat_links = self.link_encoder_.inverse_transform(y_hat_links)
return DocLabel(y_hat_props, y_hat_links)
def loss(self, y, y_hat):
if not isinstance(y_hat, DocLabel):
return self.continuous_loss(y, y_hat)
y_nodes = self.prop_encoder_.transform(y.nodes)
y_links = self.link_encoder_.transform(y.links)
node_loss = np.sum(self.prop_cw_[y_nodes] * (y.nodes != y_hat.nodes))
link_loss = np.sum(self.link_cw_[y_links] * (y.links != y_hat.links))
return node_loss + link_loss
def max_loss(self, y):
y_nodes = self.prop_encoder_.transform(y.nodes)
y_links = self.link_encoder_.transform(y.links)
return np.sum(self.prop_cw_[y_nodes]) + np.sum(self.link_cw_[y_links])
def continuous_loss(self, y, y_hat):
if isinstance(y_hat, DocLabel):
raise ValueError("continuous loss on discrete input")
if isinstance(y_hat[0], tuple):
y_hat = y_hat[0]
prop_marg, link_marg = y_hat
y_nodes = self.prop_encoder_.transform(y.nodes)
y_links = self.link_encoder_.transform(y.links)
prop_ix = np.indices(y.nodes.shape)
link_ix = np.indices(y.links.shape)
# relies on prop_marg and link_marg summing to 1 row-wise
prop_loss = np.sum(self.prop_cw_[y_nodes] *
(1 - prop_marg[prop_ix, y_nodes]))
link_loss = np.sum(self.link_cw_[y_links] *
(1 - link_marg[link_ix, y_links]))
loss = prop_loss + link_loss
return loss
def _marg_rounded(self, x, y):
y_node = y.nodes
y_link = y.links
Y_node = label_binarize(y_node, self.prop_encoder_.classes_)
Y_link = label_binarize(y_link, self.link_encoder_.classes_)
# XXX can this be avoided?
Y_node, Y_link = map(_binary_2d, (Y_node, Y_link))
src_type = Y_node[x.link_to_prop[:, 0]]
trg_type = Y_node[x.link_to_prop[:, 1]]
if self.compat_features:
pw = np.einsum('...j,...k,...l->...jkl',
src_type, trg_type, Y_link)
compat = np.tensordot(x.X_compat.T, pw, axes=[1, 0])
else:
# equivalent to compat_features == np.ones(n_links)
compat = np.einsum('ij,ik,il->jkl', src_type, trg_type, Y_link)
second_order = []
if self.coparents_ or self.grandparents_ or self.siblings_:
link = {(a, b): k for k, (a, b) in enumerate(x.link_to_prop)}
if self.coparents_:
second_order.extend(y_link[link[a, b]] & y_link[link[c, b]]
for a, b, c in x.second_order)
if self.grandparents_:
second_order.extend(y_link[link[a, b]] & y_link[link[b, c]]
for a, b, c in x.second_order)
if self.siblings_:
second_order.extend(y_link[link[b, a]] & y_link[link[b, c]]
for a, b, c in x.second_order)
second_order = np.array(second_order)
return Y_node, Y_link, compat, second_order
def _marg_fractional(self, x, y):
(prop_marg, link_marg), (compat_marg, second_order_marg) = y
if self.compat_features:
compat_marg = np.tensordot(x.X_compat.T, compat_marg, axes=[1, 0])
else:
compat_marg = compat_marg.sum(axis=0)
return prop_marg, link_marg, compat_marg, second_order_marg
def _inference(self, x, potentials, exact=False, relaxed=True,
return_energy=False, constraints=None,
eta=0.1, adapt=True, max_iter=5000,
verbose=False):
(prop_potentials,
link_potentials,
compat_potentials,
coparent_potentials,
grandparent_potentials,
sibling_potentials) = potentials
n_props, n_prop_classes = prop_potentials.shape
n_links, n_link_classes = link_potentials.shape
g = fg.PFactorGraph()
g.set_verbosity(verbose)
prop_vars = [g.create_multi_variable(n_prop_classes)
for _ in range(n_props)]
link_vars = [g.create_multi_variable(n_link_classes)
for _ in range(n_links)]
for var, scores in zip(prop_vars, prop_potentials):
for state, score in enumerate(scores):
var.set_log_potential(state, score)
for var, scores in zip(link_vars, link_potentials):
for state, score in enumerate(scores):
var.set_log_potential(state, score)
# compatibility trigram factors
compat_factors = []
link_vars_dict = {}
link_on, link_off = self.link_encoder_.transform([True, False])
# account for compat features
if self.compat_features:
assert compat_potentials.shape[0] == n_links
compats = compat_potentials
else:
compats = (compat_potentials for _ in range(n_links))
for (src, trg), link_v, compat in zip(x.link_to_prop,
link_vars,
compats):
src_v = prop_vars[src]
trg_v = prop_vars[trg]
compat_factors.append(g.create_factor_dense([src_v, trg_v, link_v],
compat.ravel()))
# keep track of binary link variables, for constraints.
# we need .get_state() to get the underlaying PBinaryVariable
link_vars_dict[src, trg] = link_v.get_state(link_on)
# second-order factors
coparent_factors = []
grandparent_factors = []
sibling_factors = []
for score, (a, b, c) in zip(coparent_potentials, x.second_order):
# a -> b <- c
vars = [link_vars_dict[a, b], link_vars_dict[c, b]]
coparent_factors.append(g.create_factor_pair(vars, score))
for score, (a, b, c) in zip(grandparent_potentials, x.second_order):
# a -> b -> c
vars = [link_vars_dict[a, b], link_vars_dict[b, c]]
grandparent_factors.append(g.create_factor_pair(vars, score))
for score, (a, b, c) in zip(sibling_potentials, x.second_order):
# a <- b -> c
vars = [link_vars_dict[b, a], link_vars_dict[b, c]]
sibling_factors.append(g.create_factor_pair(vars, score))
# domain-specific constraints
if constraints and 'cdcp' in constraints:
# antisymmetry: if a -> b, then b cannot -> a
for src in range(n_props):
for trg in range(src):
fwd_link_v = link_vars_dict[src, trg]
rev_link_v = link_vars_dict[trg, src]
g.create_factor_logic('ATMOSTONE',
[fwd_link_v, rev_link_v],
[False, False])
# transitivity.
# forall a != b != c: a->b and b->c imply a->c
for a, b, c in permutations(range(n_props), 3):
ab_link_v = link_vars_dict[a, b]
bc_link_v = link_vars_dict[b, c]
ac_link_v = link_vars_dict[a, c]
g.create_factor_logic('IMPLY',
[ab_link_v, bc_link_v, ac_link_v],
[False, False, False])
# standard model:
if 'strict' in constraints:
for src, trg in x.link_to_prop:
src_v = prop_vars[src]
trg_v = prop_vars[trg]
for types in CDCP_ILLEGAL_LINKS:
src_ix, trg_ix = self.prop_encoder_.transform(types)
g.create_factor_logic('IMPLY',
[src_v.get_state(src_ix),
trg_v.get_state(trg_ix),
link_vars_dict[src, trg]],
[False, False, True])
elif constraints and 'ukp' in constraints:
# Tree constraints using AD3 MST factor for each paragraph.
# First, identify paragraphs
prop_para = np.array(x.prop_para)
link_para = prop_para[x.link_to_prop[:, 0]]
tree_factors = []
for para_ix in np.unique(link_para):
props = np.where(prop_para == para_ix)[0]
offset = props.min()
para_vars = []
para_arcs = [] # call them arcs, semantics differ from links
# add a new head node pointing to every possible variable
for relative_ix, prop_ix in enumerate(props, 1):
para_vars.append(g.create_binary_variable())
para_arcs.append((0, relative_ix))
# add an MST arc for each link
for src, trg in x.link_to_prop[link_para == para_ix]:
relative_src = src - offset + 1
relative_trg = trg - offset + 1
para_vars.append(link_vars_dict[src, trg])
# MST arcs have opposite direction from argument links!
# because each prop can have multiple supports but not
# the other way around
para_arcs.append((relative_trg, relative_src))
tree = fg.PFactorTree()
g.declare_factor(tree, para_vars, True)
tree.initialize(1 + len(props), para_arcs)
tree_factors.append(tree)
if 'strict' in constraints:
# further domain-specific constraints
mclaim_ix, claim_ix, premise_ix = self.prop_encoder_.transform(
['MajorClaim', 'Claim', 'Premise'])
# a -> b implies a = 'premise'
for (src, trg), link_v in zip(x.link_to_prop, link_vars):
src_v = prop_vars[src]
g.create_factor_logic('IMPLY',
[link_v.get_state(link_on),
src_v.get_state(premise_ix)],
[False, False])
g.fix_multi_variables_without_factors()
g.set_eta_ad3(eta)
g.adapt_eta_ad3(adapt)
g.set_max_iterations_ad3(max_iter)
if exact:
val, posteriors, additionals, status = g.solve_exact_map_ad3()
else:
val, posteriors, additionals, status = g.solve_lp_map_ad3()
status = ["integer", "fractional", "infeasible", "not solved"][status]
prop_marg = posteriors[:n_props * n_prop_classes]
prop_marg = np.array(prop_marg).reshape(n_props, -1)
link_marg = posteriors[n_props * n_prop_classes:]
# remaining posteriors are for artificial root nodes for MST factors
link_marg = link_marg[:n_links * n_link_classes]
link_marg = np.array(link_marg).reshape(n_links, -1)
n_compat = n_links * n_link_classes * n_prop_classes ** 2
compat_marg = additionals[:n_compat]
compat_marg = np.array(compat_marg).reshape((n_links,
n_prop_classes,
n_prop_classes,
n_link_classes))
second_ordermarg = np.array(additionals[n_compat:])
posteriors = (prop_marg, link_marg)
additionals = (compat_marg, second_ordermarg)
if relaxed:
y_hat = posteriors, additionals
else:
y_hat = self._round(prop_marg, link_marg, prop_potentials,
link_potentials)
if return_energy:
return y_hat, status, -val
else:
return y_hat, status
def _score(self, Y_true, Y_pred):
acc = sum(1 for y_true, y_pred in zip(Y_true, Y_pred)
if np.all(y_true.links == y_pred.links) and
np.all(y_true.nodes == y_pred.nodes))
acc /= len(Y_true)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
link_macro, link_micro = arg_f1_scores(
(y.links for y in Y_true),
(y.links for y in Y_pred),
average='binary',
pos_label=True,
labels=self.link_encoder_.classes_
)
node_macro, node_micro = arg_f1_scores(
(y.nodes for y in Y_true),
(y.nodes for y in Y_pred),
average='macro',
labels=self.prop_encoder_.classes_
)
return link_macro, link_micro, node_macro, node_micro, acc
class ArgumentGraphCRF(BaseArgumentMixin, StructuredModel):
def __init__(self, class_weight=None, link_node_weight_ratio=1,
exact=False, constraints=None, compat_features=False,
coparents=False, grandparents=False, siblings=False):
self.class_weight = class_weight
self.link_node_weight_ratio = link_node_weight_ratio
self.exact = exact
self.constraints = constraints
self.compat_features = compat_features
self.coparents = coparents
self.grandparents = grandparents
self.siblings = siblings
self.n_second_order_factors_ = coparents + grandparents + siblings
self.n_prop_states = None
self.n_link_states = None
self.n_prop_features = None
self.n_link_features = None
self.n_second_order_features_ = None
self.n_compat_features_ = None
self.inference_calls = 0
super(ArgumentGraphCRF, self).__init__()
def initialize(self, X, Y):
# each x in X is a vectorized doc exposing sp.csr x.X_prop, x.X_link,
# and maybe x.X_compat and x.X_sec_ord
# each y in Y exposes lists y.nodes, y.links
x = X[0]
self.n_prop_features = x.X_prop.shape[1]
self.n_link_features = x.X_link.shape[1]
if self.compat_features:
self.n_compat_features_ = x.X_compat.shape[1]
if self.n_second_order_factors_:
self.n_second_order_features_ = x.X_sec_ord.shape[1]
else:
self.n_second_order_features_ = 0
self.initialize_labels(Y)
self._set_size_joint_feature()
self.coparents_ = self.coparents
self.grandparents_ = self.grandparents
self.siblings_ = self.siblings
def _set_size_joint_feature(self): # assumes no second order
compat_size = self.n_prop_states ** 2 * self.n_link_states
if self.compat_features:
compat_size *= self.n_compat_features_
total_n_second_order = (self.n_second_order_features_ *
self.n_second_order_factors_)
self.size_joint_feature = (self.n_prop_features * self.n_prop_states +
self.n_link_features * self.n_link_states +
compat_size + total_n_second_order)
logging.info("Joint feature size: {}".format(self.size_joint_feature))
def joint_feature(self, x, y):
if isinstance(y, DocLabel):
Y_prop, Y_link, compat, second_order = self._marg_rounded(x, y)
else:
Y_prop, Y_link, compat, second_order = self._marg_fractional(x, y)
prop_acc = safe_sparse_dot(Y_prop.T, x.X_prop) # node_cls * node_feats
link_acc = safe_sparse_dot(Y_link.T, x.X_link) # link_cls * link_feats
f_sec_ord = []
if len(second_order):
second_order = second_order.reshape(-1, len(x.second_order))
if self.coparents:
f_sec_ord.append(safe_sparse_dot(second_order[0], x.X_sec_ord))
second_order = second_order[1:]
if self.grandparents:
f_sec_ord.append(safe_sparse_dot(second_order[0], x.X_sec_ord))
second_order = second_order[1:]
if self.siblings:
f_sec_ord.append(safe_sparse_dot(second_order[0], x.X_sec_ord))
elif self.n_second_order_factors_:
# document has no second order factors so the joint feature
# must be filled with zeros manually
f_sec_ord = [np.zeros(self.n_second_order_features_)
for _ in range(self.n_second_order_factors_)]
jf = np.concatenate([prop_acc.ravel(), link_acc.ravel(),
compat.ravel()] + f_sec_ord)
return jf
# basically reversing the joint feature
def _get_potentials(self, x, w):
# check sizes?
n_node_coefs = self.n_prop_states * self.n_prop_features
n_link_coefs = self.n_link_states * self.n_link_features
n_compat_coefs = self.n_prop_states ** 2 * self.n_link_states
if self.compat_features:
n_compat_coefs *= self.n_compat_features_
assert w.size == (n_node_coefs + n_link_coefs + n_compat_coefs +
self.n_second_order_features_ *
self.n_second_order_factors_)
w_node = w[:n_node_coefs]
w_node = w_node.reshape(self.n_prop_states, self.n_prop_features)
w_link = w[n_node_coefs:n_node_coefs + n_link_coefs]
w_link = w_link.reshape(self.n_link_states, self.n_link_features)
# for readability, consume w. This is not inplace, don't worry.
w = w[n_node_coefs + n_link_coefs:]
w_compat = w[:n_compat_coefs]
if self.compat_features:
w_compat = w_compat.reshape((self.n_compat_features_, -1))
w_compat = np.dot(x.X_compat, w_compat)
compat_potentials = w_compat.reshape((-1,
self.n_prop_states,
self.n_prop_states,
self.n_link_states))
else:
compat_potentials = w_compat.reshape(self.n_prop_states,
self.n_prop_states,
self.n_link_states)
w = w[n_compat_coefs:]
coparent_potentials = grandparent_potentials = sibling_potentials = []
if self.coparents:
w_coparent = w[:self.n_second_order_features_]
coparent_potentials = safe_sparse_dot(x.X_sec_ord, w_coparent)
w = w[self.n_second_order_features_:]
if self.grandparents:
w_grandparent = w[:self.n_second_order_features_]
grandparent_potentials = safe_sparse_dot(x.X_sec_ord,
w_grandparent)
w = w[self.n_second_order_features_:]
if self.siblings:
w_sibling = w[:self.n_second_order_features_]
sibling_potentials = safe_sparse_dot(x.X_sec_ord, w_sibling)
prop_potentials = safe_sparse_dot(x.X_prop, w_node.T)
link_potentials = safe_sparse_dot(x.X_link, w_link.T)
return (prop_potentials, link_potentials, compat_potentials,
coparent_potentials, grandparent_potentials,
sibling_potentials)
def inference(self, x, w, relaxed=False, return_energy=False):
self.inference_calls += 1
potentials = self._get_potentials(x, w)
out = self._inference(x, potentials, exact=self.exact,
relaxed=relaxed, return_energy=return_energy,
constraints=self.constraints)
if return_energy:
return out[0], out[-1]
else:
return out[0]
def loss_augmented_inference(self, x, y, w, relaxed=None):
self.inference_calls += 1
potentials = self._get_potentials(x, w)
(prop_potentials,
link_potentials,
compat_potentials,
coparent_potentials,
grandparent_potentials,
sibling_potentials) = potentials
y_prop = self.prop_encoder_.transform(y.nodes)
y_link = self.link_encoder_.transform(y.links)
loss_augment_unaries(prop_potentials, y_prop, self.prop_cw_)
loss_augment_unaries(link_potentials, y_link, self.link_cw_)
potentials = (prop_potentials,
link_potentials,
compat_potentials,
coparent_potentials,
grandparent_potentials,
sibling_potentials)
out = self._inference(x, potentials, exact=self.exact,
relaxed=relaxed, constraints=self.constraints)
return out[0]
| bsd-3-clause |
heli522/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
tawsifkhan/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
henrykironde/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
paalge/scikit-image | doc/examples/transform/plot_radon_transform.py | 3 | 8550 | """
===============
Radon transform
===============
In computed tomography, the tomography reconstruction problem is to obtain
a tomographic slice image from a set of projections [1]_. A projection is
formed by drawing a set of parallel rays through the 2D object of interest,
assigning the integral of the object's contrast along each ray to a single
pixel in the projection. A single projection of a 2D object is one dimensional.
To enable computed tomography reconstruction of the object, several projections
must be acquired, each of them corresponding to a different angle between the
rays with respect to the object. A collection of projections at several angles
is called a sinogram, which is a linear transform of the original image.
The inverse Radon transform is used in computed tomography to reconstruct
a 2D image from the measured projections (the sinogram). A practical, exact
implementation of the inverse Radon transform does not exist, but there are
several good approximate algorithms available.
As the inverse Radon transform reconstructs the object from a set of
projections, the (forward) Radon transform can be used to simulate a
tomography experiment.
This script performs the Radon transform to simulate a tomography experiment
and reconstructs the input image based on the resulting sinogram formed by
the simulation. Two methods for performing the inverse Radon transform
and reconstructing the original image are compared: The Filtered Back
Projection (FBP) and the Simultaneous Algebraic Reconstruction
Technique (SART).
For further information on tomographic reconstruction, see
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
IEEE Press 1988. http://www.slaney.org/pct/pct-toc.html
.. [2] Wikipedia, Radon transform,
http://en.wikipedia.org/wiki/Radon_transform#Relationship_with_the_Fourier_transform
.. [3] S Kaczmarz, "Angenaeherte Aufloesung von Systemen linearer
Gleichungen", Bulletin International de l'Academie Polonaise
des Sciences et des Lettres, 35 pp 355--357 (1937)
.. [4] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction
technique (SART): a superior implementation of the ART algorithm",
Ultrasonic Imaging 6 pp 81--94 (1984)
The forward transform
=====================
As our original image, we will use the Shepp-Logan phantom. When calculating
the Radon transform, we need to decide how many projection angles we wish
to use. As a rule of thumb, the number of projections should be about the
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its *sinogram*:
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale
image = imread(data_dir + "/phantom.png", as_grey=True)
image = rescale(image, scale=0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.tight_layout()
plt.show()
######################################################################
#
# Reconstruction with the Filtered Back Projection (FBP)
# ======================================================
#
# The mathematical foundation of the filtered back projection is the Fourier
# slice theorem [2]_. It uses Fourier transform of the projection and
# interpolation in Fourier space to obtain the 2D Fourier transform of the
# image, which is then inverted to form the reconstructed image. The filtered
# back projection is among the fastest methods of performing the inverse
# Radon transform. The only tunable parameter for the FBP is the filter,
# which is applied to the Fourier transformed projections. It may be used to
# suppress high frequency noise in the reconstruction. ``skimage`` provides a
# few different options for the filter.
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, circle=True)
error = reconstruction_fbp - image
print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5),
sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
######################################################################
#
# Reconstruction with the Simultaneous Algebraic Reconstruction Technique
# =======================================================================
#
# Algebraic reconstruction techniques for tomography are based on a
# straightforward idea: for a pixelated image the value of a single ray in a
# particular projection is simply a sum of all the pixels the ray passes
# through on its way through the object. This is a way of expressing the
# forward Radon transform. The inverse Radon transform can then be formulated
# as a (large) set of linear equations. As each ray passes through a small
# fraction of the pixels in the image, this set of equations is sparse,
# allowing iterative solvers for sparse linear systems to tackle the system
# of equations. One iterative method has been particularly popular, namely
# Kaczmarz' method [3]_, which has the property that the solution will
# approach a least-squares solution of the equation set.
#
# The combination of the formulation of the reconstruction problem as a set
# of linear equations and an iterative solver makes algebraic techniques
# relatively flexible, hence some forms of prior knowledge can be
# incorporated with relative ease.
#
# ``skimage`` provides one of the more popular variations of the algebraic
# reconstruction techniques: the Simultaneous Algebraic Reconstruction
# Technique (SART) [1]_ [4]_. It uses Kaczmarz' method [3]_ as the iterative
# solver. A good reconstruction is normally obtained in a single iteration,
# making the method computationally effective. Running one or more extra
# iterations will normally improve the reconstruction of sharp, high
# frequency features and reduce the mean squared error at the expense of
# increased high frequency noise (the user will need to decide on what number
# of iterations is best suited to the problem at hand. The implementation in
# ``skimage`` allows prior information of the form of a lower and upper
# threshold on the reconstructed values to be supplied to the reconstruction.
from skimage.transform import iradon_sart
reconstruction_sart = iradon_sart(sinogram, theta=theta)
error = reconstruction_sart - image
print('SART (1 iteration) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
fig, axes = plt.subplots(2, 2, figsize=(8, 8.5), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
ax[0].set_title("Reconstruction\nSART")
ax[0].imshow(reconstruction_sart, cmap=plt.cm.Greys_r)
ax[1].set_title("Reconstruction error\nSART")
ax[1].imshow(reconstruction_sart - image, cmap=plt.cm.Greys_r, **imkwargs)
# Run a second iteration of SART by supplying the reconstruction
# from the first iteration as an initial estimate
reconstruction_sart2 = iradon_sart(sinogram, theta=theta,
image=reconstruction_sart)
error = reconstruction_sart2 - image
print('SART (2 iterations) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
ax[2].set_title("Reconstruction\nSART, 2 iterations")
ax[2].imshow(reconstruction_sart2, cmap=plt.cm.Greys_r)
ax[3].set_title("Reconstruction error\nSART, 2 iterations")
ax[3].imshow(reconstruction_sart2 - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
| bsd-3-clause |
NUAAXXY/globOpt | evaluation/packages/polarPlot.py | 2 | 8162 | from pylab import *
import argparse
import packages.primitive as primitive
import scipy.signal
import packages.relationGraph as relgraph
import packages.primitive as primitive
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, \
ParasiteAxesAuxTrans
def generatePolarPlot(anglesInDegrees, filename, N=180):
raise NotImplementedError(self.__class__.__name__ + '.generatePolarPlot')
class mFormatterDMS(object):
deg_mark = ""
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\mkern-4mu"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def _get_number_fraction(self, factor):
## check for fractional numbers
number_fraction = None
# check for 60
for threshold in [1, 60, 3600]:
if factor <= threshold:
break
d = factor // threshold
int_log_d = int(floor(math.log10(d)))
if 10**int_log_d == d and d!=1:
number_fraction = int_log_d
factor = factor // 10**int_log_d
return factor, number_fraction
return factor, number_fraction
def __call__(self, direction, factor, values):
if len(values) == 0:
return []
#ss = [[-1, 1][v>0] for v in values] #not py24 compliant
values = np.asarray(values)
ss = np.where(values>0, 1, -1)
sign_map = {(-1, True):"-"}
signs = [sign_map.get((s, v!=0), "") for s, v in zip(ss, values)]
factor, number_fraction = self._get_number_fraction(factor)
values = np.abs(values)
if number_fraction is not None:
values, frac_part = divmod(values, 10**number_fraction)
frac_fmt = "%%0%dd" % (number_fraction,)
frac_str = [frac_fmt % (f1,) for f1 in frac_part]
if factor == 1:
if number_fraction is None:
return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
else:
return [self.fmt_ds % (s*int(v), f1) for (s, v, f1) in \
zip(ss, values, frac_str)]
elif factor == 60:
deg_part, min_part = divmod(values, 60)
if number_fraction is None:
return [self.fmt_d_m % (s1, d1, m1) \
for s1, d1, m1 in zip(signs, deg_part, min_part)]
else:
return [self.fmt_d_ms % (s, d1, m1, f1) \
for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)]
elif factor == 3600:
if ss[-1] == -1:
inverse_order = True
values = values[::-1]
sings = signs[::-1]
else:
inverse_order = False
l_hm_old = ""
r = []
deg_part, min_part_ = divmod(values, 3600)
min_part, sec_part = divmod(min_part_, 60)
if number_fraction is None:
sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part]
else:
sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)]
for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str):
l_hm = self.fmt_d_m_partial % (s, d1, m1)
if l_hm != l_hm_old:
l_hm_old = l_hm
l = l_hm + s1 #l_s
else:
l = "$"+s1 #l_s
r.append(l)
if inverse_order:
return r[::-1]
else:
return r
else: # factor > 3600.
return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
#ax = axes([0.025,0.025,0.95,0.95], polar=True)
preferred_angles = [90]
best_num = 15
theta = np.arange(0.0, np.pi, np.pi/N)
radii = np.zeros(N)
for angle in anglesInDegrees:
if (angle != -1):
radii[int(N/360.*angle)] += 1
"""
polar projection, but in a rectangular box.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(3)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
deg_mark = "^{\circ}"
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
tick_formatter1 = mFormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
#ax1 = axes([0.025,0.025,0.95,0.95], grid_helper=grid_helper)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
ax1.axis["left"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
ax1.axis["bottom"].major_ticklabels.set_visible(False)
fig.add_subplot(ax1)
limitvalue=np.max(radii)+1
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 180]), 50),
intp(np.array([limitvalue, limitvalue]), 50), color='k')
ax2.bar (360.*theta/np.pi, radii, 0.01*np.ones(N), color='k', linewidth=3)
ax1.set_aspect(1.)
ax1.set_xlim(limitvalue, -limitvalue)
ax1.set_ylim(0, limitvalue)
ax1.grid(True)
savefig(filename[:-4]+'.svg', format="svg")
#show()
| apache-2.0 |
frank-tancf/scikit-learn | sklearn/semi_supervised/label_propagation.py | 14 | 15965 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
jstraub/bnp | python/dirHdpGenerative.py | 1 | 15131 |
import numpy as np
import libbnp as bnp
import scipy.special as scisp
import scipy.io as sio
import matplotlib.pyplot as plt
import pickle
def stickBreaking(v):
N = v.size
prop = np.zeros(N+1)
for i in range(0,N):
if i == N-1:
prop[i] = 1.0
else:
prop[i]=v[i]
for j in range(0,i):
prop[i] *= (1.0-v[j])
return prop
def logCat(x, pi):
logP = 0.0
if x < pi.size:
logP += np.log(pi[x])
for i in xrange(0,pi.size):
if i != x:
logP += np.log(1.0-pi[i])
return logP
def logBeta(x, alpha, beta):
if type(x) is np.ndarray:
N=x.size
# at these x the value is only 1/log(B(alpha,beta))
# because 0^0 = 1
issue=(x<1.0e-15)|(1.0-x<1.0e-15)
# print('\t\talpha={}; beta={}'.format(alpha, beta))
# print('\t\tbetaln={}'.format(-N*scisp.betaln(alpha,beta)))
# print('\t\talphaterm={}'.format((alpha-1.0)*np.sum(np.log(x))))
# print('\t\tbetaterm={}'.format((beta-1.0)*np.sum(np.log(1.0-x))))
# print('\t\tbetaterm={}'.format(np.sum(np.log(1.0-x[~issue]))))
# print('\t\tbetaterm={}'.format(np.log(1.0-x[~issue])))
# print('\t\tbetaterm={}'.format(1.0-x))
# print('\t\tbetaterm={}'.format(x))
# CDF: - do not use!
#print('\t\tterm={}'.format(np.log(scisp.betainc(alpha,beta,x))))
#return np.sum(np.log(scisp.betainc(alpha,beta,x)))
if alpha == 1.0:
return -N*scisp.betaln(alpha,beta) \
+(beta-1.0)*np.sum(np.log(1.0-x[~issue]))
elif beta == 1.0:
return -N*scisp.betaln(alpha,beta) \
+(alpha-1.0)*np.sum(np.log(x[~issue]))
else:
return -N*scisp.betaln(alpha,beta) \
+(alpha-1.0)*np.sum(np.log(x[~issue])) \
+(beta-1.0)*np.sum(np.log(1.0-x[~issue]))
else:
if (x<1.0e-15)|(1.0-x<1.0e-15):
return -scisp.betaln(alpha,beta)
else:
return -scisp.betaln(alpha,beta) \
+(alpha-1.0)*np.log(x) \
+(beta-1.0)*np.log(1.0-x)
def logDir(x, alpha):
logP = 0.0
if alpha.size == x.size:
logP = scisp.gammaln(np.sum(alpha))
for i in xrange(0, alpha.size):
logP += -scisp.gammaln(alpha[i]) + (alpha[i]-1.0)*np.log(x[i])
return logP
class HDP_base:
# # parameters
# c = []
# z = []
# beta = np.zeros(1)
# v = np.zeros(1)
# sigV = np.zeros(1)
# pi = []
# sigPi = []
# # data
# x_tr = []
# x_ho = []
# # results
# perp = np.zeros(1)
loaded = dict()
def __init__(s, K=None,T=None,Nw=None,omega=None,alpha=None,base=None, pathToModel=None):
s.scalars = ['K','T','Nw','omega','alpha','D_tr','D_te']
s.matrices = ['c','pi','sigPi','base','beta','sigV','v','perp']
s.listMatrices=['z','x_tr','x_te','logP_w']
s.shape=dict()
# s.states=[]
# s.states.expand(s.scalars)
# s.states.expand(s.matrices)
# s.states.expand(s.listMatrices)
s.state=dict()
if pathToModel is None:
s.state['K'] = K
s.state['T'] = T
s.state['Nw'] = Nw
# hyper parameters
s.state['omega'] = omega
s.state['alpha'] = alpha
s.state['base'] = np.zeros(base.rowDim())
base.asRow(s.state['base'])
print('in HDP_base setting K={}; T={}; Nw={}; omega={}; alpha={};'.format(s.state['K'],s.state['T'],s.state['Nw'],s.state['omega'],s.state['alpha']))
else:
s.load(pathToModel)
def save(s,path):
pickle.dump(s.state,open(path,'w'))
# sio.savemat(path,s.state,oned_as='row')
def load(s,path):
try:
# s.loaded=sio.loadmat(path)
s.state=pickle.load(open(path,'r'))
print('Found model under {0}'.format(path))
except Exception, err:
print('Did not find model under {0}'.format(path))
return False
# print('--- loading state from mat file at {}'.format(path))
# if HDP_base.__parseLoad(s,s.loaded):
# print('--- loaded state successfully!')
# else:
# print('--- error while loading state!')
return True
def __parseLoad(s,mat):
print('mat.keys: {}'.format(mat.keys()))
for scalar in s.scalars:
s.state[scalar]=mat[scalar][0][0]
print('loaded {}\t {}'.format(scalar,s.state[scalar]))
for matrix in s.matrices:
s.state[matrix]=mat[matrix]
if s.state[matrix].shape[1] == 1: # savemat/loadmat puts vectors always as column vectors
s.state[matrix] = s.state[matrix].ravel()
print('loaded {}\t {}'.format(matrix,s.state[matrix].shape))
# have to be added to the appropriate matrices or listMatrices
# s.state['x_tr'] = mat['x_tr']
# print('loaded x_tr\t {}'.format(s.state['x_tr'].shape))
# s.state['x_ho'] = mat['x_ho']
for listMatrix in s.listMatrices:
s.state[listMatrix] = []
D=s.state['D_tr'] #mat[listMatrix].size
if listMatrix == 'x_te':
D=s.state['D_te']
print('{} D={}'.format(listMatrix,D))
if D==1:
s.state[listMatrix].append(mat[listMatrix].ravel())
#print('loaded {}_{}\t {}'.format(listMatrix,d,s.state[listMatrix][0].shape))
else:
for d in range(0,D):
s.state[listMatrix].append(mat[listMatrix][d][0])
if len(s.state[listMatrix][d].shape) >1:
if s.state[listMatrix][d].shape[1] == 1: # savemat/loadmat puts vectors always as column vectors
s.state[listMatrix][d] = s.state[listMatrix][d].ravel()
#print('loaded {}_{}\t {}'.format(listMatrix,d,s.state[listMatrix][d].shape))
print('loaded {}\t {}'.format(listMatrix,D))
return True
def stateEquals(s,hdp):
print('--- checking whether state of two hdps is equal')
for key, val in s.state.iteritems():
print('checking {}'.format(key))
if key in s.scalars:
if hdp.state[key] != val:
print('keys {} differ'.format(key))
return False
elif key in s.matrices:
if np.any(hdp.state[key] != val):
print('keys {} differ'.format(key))
print('{}\nvs\n{}'.format(val,hdp.state[key]))
return False
elif key in s.listMatrices:
D=len(val)
for d in range(0,D):
if np.any(hdp.state[key][d] != val[d]):
print('keys {} at d={} differ'.format(key,d))
print('{}\nvs\n{}'.format(val[d],hdp.state[key][d]))
return False
print('--- HDPs state is equal!')
return True
def loadHDPSample(s, x_tr, x_te, hdp):
if isinstance(hdp,bnp.HDP_var_Dir) or isinstance(hdp,bnp.HDP_var_NIW) or isinstance(hdp,bnp.HDP_var_ss):
print("---------------------- obtaining results -------------------------");
s.state['x_tr'] = x_tr
s.state['x_te'] = x_te
#print('{}'.format(s.state['x_tr']))
D_tr=s.state['D_tr']=len(s.state['x_tr'])
D_te=s.state['D_te']=len(s.state['x_te'])
print('D_tr={}; D_te={};'.format(s.state['D_tr'],s.state['D_te']))
s.state['sigV'] = np.zeros(s.state['K']+1,dtype=np.double)
s.state['v'] = np.zeros(s.state['K'],dtype=np.double)
hdp.getCorpTopicProportions(s.state['v'],s.state['sigV'])
print('gotCorpTopicProportions {} {}'.format(s.state['v'].shape,s.state['sigV'].shape))
s.state['perp'] = np.zeros(D_tr)
hdp.getPerplexity(s.state['perp'])
print('Perplexity of iterations: {}'.format(s.state['perp']))
betaCols = hdp.getTopicsDescriptionLength()
s.state['beta']= np.zeros((s.state['K'],betaCols),dtype=np.double)
hdp.getCorpTopics(s.state['beta'])
print('beta={}'.format(s.state['beta'].shape))
print('beta={}'.format(s.state['beta']))
s.state['sigPi']=np.zeros((D_tr,s.state['T']+1),dtype=np.double)
s.state['pi']=np.zeros((D_tr,s.state['T']),dtype=np.double)
s.state['c']=np.zeros((D_tr,s.state['T']),dtype=np.uint32)
if hdp.getDocTopics(s.state['pi'],s.state['sigPi'],s.state['c']):
print('pi: {}'.format(s.state['pi'].shape))
#print('pi: {}'.format(s.state['pi']))
print('sigPi: {}'.format(s.state['sigPi'].shape))
#print('sigPi: {}'.format(s.state['sigPi']))
else:
print('error while loading pi, sigPi and c from C++ model')
s.state['logP_w'] = [] #np.zeros((D_tr,s.state['Nw']),dtype=np.double)
s.state['z']=[] # word indices to doc topics
for d in range(0,D_tr):
N_d = s.state['x_tr'][d].size
#print('N_d={}'.format(N_d))
# print('getting {}'.format(d))
# hdp.getDocTopics(s.state['pi'][d,:],s.state['sigPi'][d,:],s.state['c'][d,:],d)
#print('c({0}): {1}'.format(d,s.state['c'][d]))
s.state['z'].append(np.zeros(N_d,dtype=np.uint32))
hdp.getWordTopics(s.state['z'][d],d)
s.state['logP_w'].append(np.zeros(N_d,dtype=np.double))
hdp.getWordDistr(s.state['logP_w'][d],d)
else:
print('Error loading hdp of type {}'.format(type(hdp)))
def checkSticks(s):
print('--------------------- Checking Stick pieces -----------------')
print('sigV = {0}; {1}'.format(s.state['sigV'],np.sum(self.sigV)))
D=len(s.state['x_tr'])
for d in range(0,D):
np.sum(s.state['sigPi'][d])
print('sigPi = {0}; {1}'.format(s.state['sigPi'][d],np.sum(self.sigPi[d])))
# KL divergence
def klD(s,logP,logQ):
p = np.exp(logP)
if len(logP.shape) > 1:
kl = np.zeros(p.shape[0])
for i in range(0,logP.shape[0]):
kl[i] = np.sum(p[i,:]*(logP[i,:]-logQ[i,:]))
return kl
else:
return np.sum(p*(logP-logQ))
# symmeterised divergence
def symKL(s,logP,logQ):
return np.sum((logP-logQ)*(np.exp(logP)-np.exp(logQ)))
def symKLImg(s):
D_tr = s.state['D_tr']
K = s.state['K']
T = s.state['T']
# create image for topic
symKLd = np.zeros((D_tr,D_tr))
logP = s.state['logP_w']
p = np.exp(logP)
for di in range(0,D_tr):
for dj in range(0,D_tr):
symKLd[di,dj] = np.sum((logP[di,:]-logP[dj,:])*(p[di,:]-p[dj,:]))
#s.symKL(s.state['logP_w'][di],s.state['logP_w'][dj])
return symKLd
# Jensen-Shannon Divergence -
def jsD(s,logP,logQ):
p=np.exp(logP)
q=np.exp(logQ)
logM=-np.log(2)+np.log(p + q)
return 0.5*np.sum(p*(logP-logM))+0.5*np.sum(q*(logQ-logM))
def jsDImg(s):
D_tr = s.state['D_tr']
K = s.state['K']
T = s.state['T']
# create image for topic
jsd = np.zeros((D_tr,D_tr))
for di in range(0,D_tr):
for dj in range(0,D_tr):
jsd[di,dj] = s.jsD(s.state['logP_w'][di],s.state['logP_w'][dj])
return jsd
def docTopicsImg(s):
D_tr = s.state['D_tr']
K = s.state['K']
T = s.state['T']
# create image for topic
vT = np.zeros((K,D_tr))
for d in range(0,D_tr):
for t in range(0,T):
#print('{0} {1} c.shape={2}'.format(d,t,s.c[d].shape))
k=s.state['c'][d][t]
vT[k,d] += s.state['sigPi'][d][t]
# print('vT={0}'.format(vT))
# print('vT_norm={0}'.format(np.sum(vT,0)))
# print('sigPi_d={0}'.format(s.sigPi[d]))
return vT
def plotTopics(s,minSupport=None):
D = s.state['D_tr']
ks=np.zeros(D)
for d in range(0,D):
# necessary since topics may be selected several times!
c_u=np.unique(s.state['c'][d,:])
sigPi_u = np.zeros(c_u.size)
for i in range(0,c_u.size):
#print('{}'.format(c_u[i] == s.c[d]))
#print('{}'.format(s.sigPi[d]))
sigPi_u[i] = np.sum(s.state['sigPi'][d,c_u[i] == s.state['c'][d,:]])
k_max = c_u[sigPi_u == np.max(sigPi_u)]
# print('c={};'.format(s.c[d]))
# print('sigPi={};'.format(s.sigPi[d]))
print('sigPi_u = {};\tc_u={};\tk_max={}'.format(sigPi_u,c_u,k_max))
# t_max=np.nonzero(s.sigPi[d]==np.max(s.sigPi[d]))[0][0]
# print('d={}; D={}'.format(d,D))
# print('t_max={};'.format(np.nonzero(s.sigPi[d]==np.max(s.sigPi[d]))))
# print('sigPi={}; sum={}'.format(s.sigPi[d],np.sum(s.sigPi[d])))
# print('c[{}]={};'.format(d,s.c[d]))
# if t_max < s.c[d].size:
# k_max = s.c[d][t_max]
# else:
# k_max = np.nan # this means that we arre not selecting one of the estimated models!! (the last element in sigPi is 1-sum(sigPi(0:end-1)) and represents the "other" models
ks[d]=k_max[0]
ks_unique=np.unique(ks)
ks_unique=ks_unique[~np.isnan(ks_unique)]
if minSupport is not None:
Np = ks_unique.size # numer of subplots
#print('D{0} Np{1}'.format(D,Np))
sup = np.zeros(ks_unique.size)
for d in range(0,D):
sup[np.nonzero(ks_unique==ks[d])[0]] += 1
#print('sup={0} sum(sup)={1}'.format(sup,np.sum(sup)))
delete = np.zeros(ks_unique.size,dtype=np.bool)
for i in range(0,Np):
if sup[i] < minSupport:
delete[i]=True
ks_unique = ks_unique[~delete]
Np = ks_unique.size # numer of subplots
print('D{0} Np{1}'.format(D,Np))
Nrow = np.ceil(np.sqrt(Np))
Ncol = np.ceil(np.sqrt(Np))
fig=plt.figure()
for i in range(0,Np):
plt.subplot(Ncol,Nrow,i+1)
x = np.linspace(0,s.state['beta'][int(ks_unique[i])].size-1,s.state['beta'][int(ks_unique[i])].size)
plt.stem(x,s.state['beta'][int(ks_unique[i])])
plt.ylim([0.0,1.0])
plt.xlabel('topic '+str(ks_unique[i]))
return fig
class HDP_sample(HDP_base):
def generateDirHDPSample(s,D,N):
# doc level
s.state['D_tr'] = D
s.state['x_tr']=[]
# draw K topics from Dirichlet
s.state['beta'] = np.random.dirichlet(s.state['base'],s.state['K'])
# draw breaking proportions using Beta
s.state['v'] = np.random.beta(1,s.state['omega'],s.state['K'])
s.state['sigV'] = stickBreaking(s.state['v'])
s.state['c']=[] # pointers to corp level topics
s.state['pi']=[] # breaking proportions for selected doc level topics
s.state['sigPi']=[]
s.state['z']=[]
s.state['c'] = np.zeros((D,s.state['T']))
s.state['pi'] = np.zeros((D,s.state['T']))
s.state['sigPi'] = np.zeros((D,s.state['T']+1))
s.state['logP_w']=np.ones((D,s.state['Nw']))*0.1
for d in range(0,D): # for each document
# draw T doc level pointers to topics (multinomial)
_, s.state['c'][d,:] = np.nonzero(np.random.multinomial(1,s.state['sigV'],s.state['T']))
# draw T doc level breaking proportions using Beta
s.state['pi'][d,:] = np.random.beta(1,s.state['alpha'],s.state['T'])
s.state['sigPi'][d,:] = stickBreaking(s.state['pi'][d,:])
s.state['x_tr'].append(np.zeros(N))
# draw topic assignment of word (multinomial)
s.state['z'].append(np.zeros(N))
_, s.state['z'][d] = np.nonzero(np.random.multinomial(1,s.state['sigPi'][d,:],N))
for i in range(0,N): # for each word
# draw words
_, s.state['x_tr'][d][i] = np.nonzero(np.random.multinomial(1,s.state['beta'][ s.state['c'][d, s.state['z'][d][i]], :],1))
s.state['logP_w'][d, s.state['x_tr'][d][i]] +=1
s.state['x_tr'][d] = s.state['x_tr'][d].astype(np.uint32)
s.state['logP_w'][d,:] /= np.sum(s.state['logP_w'][d,:])
s.state['logP_w'] = np.log(s.state['logP_w'])
# for d in range(0,D):
# print('d={0}: {1}'.format(d,s.state['x_tr'][d]))
s.state['D_te'] = 0
s.state['perp'] = []
return s.state['x_tr'], s.state['sigV'], s.state['beta'], s.state['pi'], s.state['c']
| mit |
openclimatedata/pyhector | scripts/write_defaults.py | 1 | 8913 | #!/usr/bin/env python3
# Save Hector default config and emissions units as importable Python
# modules.
# Usage (requires Pandas):
# Run this script as
# ./write_defaults.py
import ast
import configparser
import os
import pandas as pd
from pprint import pformat
# Default config from `ini`-file
default_config = os.path.join(os.path.dirname(__file__),
'../pyhector/rcp_default.ini')
config = configparser.ConfigParser(inline_comment_prefixes=(';'))
config.optionxform = str
config.read(default_config)
output = '"""Dictionary with default config."""\n\n'
parameters = {}
for section in config.sections():
if len(config.options(section)) > 0:
parameters[section] = {}
for option in config.options(section):
value = config.get(section, option)
# Hector-specific ini property-value assignment with time index,
# like 'Ftalbedo[1750]' will be turned into a list of tuples.
if option.endswith("]"):
split = option.split("[")
name = split[0]
year = int(split[1][:-1]) # leave out closing "]"
if name not in parameters[section]:
parameters[section][name] = []
parameters[section][name].append(
(year, ast.literal_eval(value),)
)
else:
if option == 'run_name':
value = "pyhector-run"
elif option in ["enabled", "do_spinup"]:
value = True if int(value) == 1 else False
# Values containing a unit like "H0=35.0,pptv" are split and
# turned into a tuple.
elif "," in value:
number, unit = tuple(value.split(","))
value = (ast.literal_eval(number), unit,)
# Convert floats and ints to Python numbers
else:
value = ast.literal_eval(value)
parameters[section][option] = value
# Volcanic RF (from Volcanic_RF.csv)
parameters["so2"]["SV"] = [
(1765, 0),
(1766, 0.11622211),
(1767, 0.23244422),
(1768, 0.19744422),
(1769, 0.13181922),
(1770, 0.12744422),
(1771, 0.17994422),
(1772, 0.21494422),
(1773, 0.22806922),
(1774, 0.23244422),
(1775, 0.23244422),
(1776, 0.23244422),
(1777, 0.23244422),
(1778, 0.23244422),
(1779, 0.23244422),
(1780, 0.23244422),
(1781, 0.23244422),
(1782, 0.21056922),
(1783, 0.11869422),
(1784, 0.044319219),
(1785, 0.10119422),
(1786, 0.18869422),
(1787, 0.22369422),
(1788, 0.22806922),
(1789, 0.12306922),
(1790, -0.025680781),
(1791, 0.009319219),
(1792, 0.14056922),
(1793, 0.20619422),
(1794, 0.21494422),
(1795, 0.19306922),
(1796, 0.18869422),
(1797, 0.21056922),
(1798, 0.22806922),
(1799, 0.23244422),
(1800, 0.23244422),
(1801, 0.23244422),
(1802, 0.23244422),
(1803, 0.23244422),
(1804, 0.23244422),
(1805, 0.23244422),
(1806, 0.23244422),
(1807, 0.23244422),
(1808, -0.25755578),
(1809, -1.3994308),
(1810, -1.8019308),
(1811, -1.0100558),
(1812, -0.34068078),
(1813, -0.14818078),
(1814, -0.67755578),
(1815, -2.1519308),
(1816, -2.6638058),
(1817, -1.4956808),
(1818, -0.36255578),
(1819, 0.092444219),
(1820, 0.22369422),
(1821, 0.23244422),
(1822, 0.23244422),
(1823, 0.23244422),
(1824, 0.23244422),
(1825, 0.23244422),
(1826, 0.23244422),
(1827, 0.23244422),
(1828, 0.22806922),
(1829, 0.19306922),
(1830, -0.021305781),
(1831, -0.39755578),
(1832, -0.48505578),
(1833, -0.18318078),
(1834, -0.34505578),
(1835, -0.98818078),
(1836, -0.94880578),
(1837, -0.34943078),
(1838, 0.044319219),
(1839, 0.17994422),
(1840, 0.17994422),
(1841, 0.17556922),
(1842, 0.18869422),
(1843, 0.16681922),
(1844, 0.15369422),
(1845, 0.18431922),
(1846, 0.21494422),
(1847, 0.22806922),
(1848, 0.23244422),
(1849, 0.21599422),
(1850, 0.19200464),
(1851, 0.19303276),
(1852, 0.20879734),
(1853, 0.2235338),
(1854, 0.2317588),
(1855, 0.10118693),
(1856, -0.35667141),
(1857, -0.61027557),
(1858, -0.29772557),
(1859, 0.028190052),
(1860, 0.15053693),
(1861, 0.13957026),
(1862, 0.05526401),
(1863, 0.06760151),
(1864, 0.15087964),
(1865, 0.19851609),
(1866, 0.21907859),
(1867, 0.22901714),
(1868, 0.23107339),
(1869, 0.22284839),
(1870, 0.21873589),
(1871, 0.2194213),
(1872, 0.2070838),
(1873, 0.19337547),
(1874, 0.20125776),
(1875, 0.19406089),
(1876, 0.15293589),
(1877, 0.14848068),
(1878, 0.18035255),
(1879, 0.20194318),
(1880, 0.21565151),
(1881, 0.22284839),
(1882, 0.11112547),
(1883, -0.80253495),
(1884, -1.5373016),
(1885, -0.94407349),
(1886, -0.44851724),
(1887, -0.32137245),
(1888, -0.21102036),
(1889, -0.27750578),
(1890, -0.3316537),
(1891, -0.2617412),
(1892, -0.087645365),
(1893, 0.060061927),
(1894, 0.16938589),
(1895, 0.1618463),
(1896, 0.016537969),
(1897, -0.064683906),
(1898, 0.037785885),
(1899, 0.15053693),
(1900, 0.20194318),
(1901, 0.19166193),
(1902, -0.25694328),
(1903, -0.65105786),
(1904, -0.3563287),
(1905, 0.002144219),
(1906, 0.1001588),
(1907, 0.076169219),
(1908, 0.087821302),
(1909, 0.15396401),
(1910, 0.18892026),
(1911, 0.19508901),
(1912, -0.048576615),
(1913, -0.15652974),
(1914, 0.055606719),
(1915, 0.15944734),
(1916, 0.18754943),
(1917, 0.19611714),
(1918, 0.20194318),
(1919, 0.1947463),
(1920, 0.11078276),
(1921, 0.095703594),
(1922, 0.18103797),
(1923, 0.20228589),
(1924, 0.18926297),
(1925, 0.18926297),
(1926, 0.19714526),
(1927, 0.19508901),
(1928, 0.14573901),
(1929, 0.096046302),
(1930, 0.12174943),
(1931, 0.14642443),
(1932, 0.11489526),
(1933, 0.12380568),
(1934, 0.16218901),
(1935, 0.16938589),
(1936, 0.18069526),
(1937, 0.17932443),
(1938, 0.16321714),
(1939, 0.16390255),
(1940, 0.18377964),
(1941, 0.19303276),
(1942, 0.17144214),
(1943, 0.16630151),
(1944, 0.18926297),
(1945, 0.19988693),
(1946, 0.2070838),
(1947, 0.1988588),
(1948, 0.19680255),
(1949, 0.18926297),
(1950, 0.18686401),
(1951, 0.1906338),
(1952, 0.18446505),
(1953, 0.17555464),
(1954, 0.18069526),
(1955, 0.2029713),
(1956, 0.2153088),
(1957, 0.22559005),
(1958, 0.23073068),
(1959, 0.22696089),
(1960, 0.15190776),
(1961, 0.060747344),
(1962, -0.013963073),
(1963, -0.49546828),
(1964, -0.85325578),
(1965, -0.53248078),
(1966, -0.15001828),
(1967, -0.021502656),
(1968, -0.22027349),
(1969, -0.29635474),
(1970, -0.055430781),
(1971, 0.11420984),
(1972, 0.15327859),
(1973, 0.10632755),
(1974, -0.040694323),
(1975, -0.14110786),
(1976, -0.017390156),
(1977, 0.13408693),
(1978, 0.10906922),
(1979, 0.093990052),
(1980, 0.14813797),
(1981, 0.079596302),
(1982, -0.59142661),
(1983, -0.87621724),
(1984, -0.32000161),
(1985, -0.023216198),
(1986, 0.035044219),
(1987, 0.065887969),
(1988, 0.10598484),
(1989, 0.12928901),
(1990, 0.12654734),
(1991, -0.81487245),
(1992, -1.4197527),
(1993, -0.68190161),
(1994, -0.13151203),
(1995, 0.058691094),
(1996, 0.12380568),
(1997, 0.15327859),
(1998, 0.18754943),
(1999, 0.21633693),
(2000, 0.22867443),
(2001, 0.23244422),
(2002, 0.23244422),
(2003, 0.23244422),
(2004, 0.23244422),
(2005, 0.18401834),
(2006, 0.06779623),
(2007,0),
(2008,0),
(2009,0),
(2010,0)
]
output += "_default_config = {\n " + \
pformat(parameters, indent=1, width=75)[1:-1].replace(
"\n '", "\n '").replace(
" '", " '") + \
"\n}\n"
with open(os.path.join(os.path.dirname(__file__),
'../pyhector/default_config.py'), 'w') as f:
f.write(output)
# Default units from input CSV
units = pd.read_csv(
os.path.join(os.path.dirname(__file__),
'../pyhector/emissions/RCP26_emissions.csv'),
skiprows=2,
header=None)
units = units.loc[:1, 1:].T.set_index(1).to_dict()[0]
with open(os.path.join(os.path.dirname(__file__),
"../pyhector/units.py"), "w") as f:
f.write('"""')
f.write('Dictionary of emissions and their expected units in Hector.')
f.write('"""\n\n')
f.write("units = {\n ")
f.write(pformat(units, indent=4)[1:-1])
f.write("\n}\n")
| agpl-3.0 |
kubeflow/kfp-tekton | sdk/python/tests/compiler/testdata/old_kfp_volume.py | 1 | 38533 | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import json
import kfp.components as comp
from collections import OrderedDict
from kubernetes import client as k8s_client
def loaddata():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
path = "data/"
PREDICTION_LABEL = 'Survived'
test_df = pd.read_csv(path + "test.csv")
train_df = pd.read_csv(path + "train.csv")
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(PREDICTION_LABEL, "PREDICTION_LABEL")
_kale_marshal_utils.save(test_df, "test_df")
_kale_marshal_utils.save(train_df, "train_df")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (
block1,
block2,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/loaddata.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('loaddata')
_kale_mlmd_utils.call("mark_execution_complete")
def datapreprocessing():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
test_df = _kale_marshal_utils.load("test_df")
train_df = _kale_marshal_utils.load("train_df")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
data = [train_df, test_df]
for dataset in data:
dataset['relatives'] = dataset['SibSp'] + dataset['Parch']
dataset.loc[dataset['relatives'] > 0, 'not_alone'] = 0
dataset.loc[dataset['relatives'] == 0, 'not_alone'] = 1
dataset['not_alone'] = dataset['not_alone'].astype(int)
train_df['not_alone'].value_counts()
'''
block3 = '''
# This does not contribute to a person survival probability
train_df = train_df.drop(['PassengerId'], axis=1)
'''
block4 = '''
import re
deck = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "U": 8}
data = [train_df, test_df]
for dataset in data:
dataset['Cabin'] = dataset['Cabin'].fillna("U0")
dataset['Deck'] = dataset['Cabin'].map(lambda x: re.compile("([a-zA-Z]+)").search(x).group())
dataset['Deck'] = dataset['Deck'].map(deck)
dataset['Deck'] = dataset['Deck'].fillna(0)
dataset['Deck'] = dataset['Deck'].astype(int)
# we can now drop the cabin feature
train_df = train_df.drop(['Cabin'], axis=1)
test_df = test_df.drop(['Cabin'], axis=1)
'''
block5 = '''
data = [train_df, test_df]
for dataset in data:
mean = train_df["Age"].mean()
std = test_df["Age"].std()
is_null = dataset["Age"].isnull().sum()
# compute random numbers between the mean, std and is_null
rand_age = np.random.randint(mean - std, mean + std, size = is_null)
# fill NaN values in Age column with random values generated
age_slice = dataset["Age"].copy()
age_slice[np.isnan(age_slice)] = rand_age
dataset["Age"] = age_slice
dataset["Age"] = train_df["Age"].astype(int)
train_df["Age"].isnull().sum()
'''
block6 = '''
train_df['Embarked'].describe()
'''
block7 = '''
# fill with most common value
common_value = 'S'
data = [train_df, test_df]
for dataset in data:
dataset['Embarked'] = dataset['Embarked'].fillna(common_value)
'''
block8 = '''
train_df.info()
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(test_df, "test_df")
_kale_marshal_utils.save(train_df, "train_df")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
block3,
block4,
block5,
block6,
block7,
block8,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/datapreprocessing.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('datapreprocessing')
_kale_mlmd_utils.call("mark_execution_complete")
def featureengineering():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
PREDICTION_LABEL = _kale_marshal_utils.load("PREDICTION_LABEL")
test_df = _kale_marshal_utils.load("test_df")
train_df = _kale_marshal_utils.load("train_df")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
data = [train_df, test_df]
for dataset in data:
dataset['Fare'] = dataset['Fare'].fillna(0)
dataset['Fare'] = dataset['Fare'].astype(int)
'''
block3 = '''
data = [train_df, test_df]
titles = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in data:
# extract titles
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
# replace titles with a more common title or as Rare
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr',\\
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
# convert titles into numbers
dataset['Title'] = dataset['Title'].map(titles)
# filling NaN with 0, to get safe
dataset['Title'] = dataset['Title'].fillna(0)
train_df = train_df.drop(['Name'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
'''
block4 = '''
genders = {"male": 0, "female": 1}
data = [train_df, test_df]
for dataset in data:
dataset['Sex'] = dataset['Sex'].map(genders)
'''
block5 = '''
train_df = train_df.drop(['Ticket'], axis=1)
test_df = test_df.drop(['Ticket'], axis=1)
'''
block6 = '''
ports = {"S": 0, "C": 1, "Q": 2}
data = [train_df, test_df]
for dataset in data:
dataset['Embarked'] = dataset['Embarked'].map(ports)
'''
block7 = '''
data = [train_df, test_df]
for dataset in data:
dataset['Age'] = dataset['Age'].astype(int)
dataset.loc[ dataset['Age'] <= 11, 'Age'] = 0
dataset.loc[(dataset['Age'] > 11) & (dataset['Age'] <= 18), 'Age'] = 1
dataset.loc[(dataset['Age'] > 18) & (dataset['Age'] <= 22), 'Age'] = 2
dataset.loc[(dataset['Age'] > 22) & (dataset['Age'] <= 27), 'Age'] = 3
dataset.loc[(dataset['Age'] > 27) & (dataset['Age'] <= 33), 'Age'] = 4
dataset.loc[(dataset['Age'] > 33) & (dataset['Age'] <= 40), 'Age'] = 5
dataset.loc[(dataset['Age'] > 40) & (dataset['Age'] <= 66), 'Age'] = 6
dataset.loc[ dataset['Age'] > 66, 'Age'] = 6
# let's see how it's distributed train_df['Age'].value_counts()
'''
block8 = '''
data = [train_df, test_df]
for dataset in data:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[(dataset['Fare'] > 31) & (dataset['Fare'] <= 99), 'Fare'] = 3
dataset.loc[(dataset['Fare'] > 99) & (dataset['Fare'] <= 250), 'Fare'] = 4
dataset.loc[ dataset['Fare'] > 250, 'Fare'] = 5
dataset['Fare'] = dataset['Fare'].astype(int)
'''
block9 = '''
data = [train_df, test_df]
for dataset in data:
dataset['Age_Class']= dataset['Age']* dataset['Pclass']
'''
block10 = '''
for dataset in data:
dataset['Fare_Per_Person'] = dataset['Fare']/(dataset['relatives']+1)
dataset['Fare_Per_Person'] = dataset['Fare_Per_Person'].astype(int)
# Let's take a last look at the training set, before we start training the models.
train_df.head(10)
'''
block11 = '''
train_labels = train_df[PREDICTION_LABEL]
train_df = train_df.drop(PREDICTION_LABEL, axis=1)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(train_df, "train_df")
_kale_marshal_utils.save(train_labels, "train_labels")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
block3,
block4,
block5,
block6,
block7,
block8,
block9,
block10,
block11,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/featureengineering.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('featureengineering')
_kale_mlmd_utils.call("mark_execution_complete")
def decisiontree():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
train_df = _kale_marshal_utils.load("train_df")
train_labels = _kale_marshal_utils.load("train_labels")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
decision_tree = DecisionTreeClassifier()
decision_tree.fit(train_df, train_labels)
acc_decision_tree = round(decision_tree.score(train_df, train_labels) * 100, 2)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(acc_decision_tree, "acc_decision_tree")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/decisiontree.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('decisiontree')
_kale_mlmd_utils.call("mark_execution_complete")
def svm():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
train_df = _kale_marshal_utils.load("train_df")
train_labels = _kale_marshal_utils.load("train_labels")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
linear_svc = SVC(gamma='auto')
linear_svc.fit(train_df, train_labels)
acc_linear_svc = round(linear_svc.score(train_df, train_labels) * 100, 2)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(acc_linear_svc, "acc_linear_svc")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/svm.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('svm')
_kale_mlmd_utils.call("mark_execution_complete")
def naivebayes():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
train_df = _kale_marshal_utils.load("train_df")
train_labels = _kale_marshal_utils.load("train_labels")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
gaussian = GaussianNB()
gaussian.fit(train_df, train_labels)
acc_gaussian = round(gaussian.score(train_df, train_labels) * 100, 2)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(acc_gaussian, "acc_gaussian")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/naivebayes.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('naivebayes')
_kale_mlmd_utils.call("mark_execution_complete")
def logisticregression():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
train_df = _kale_marshal_utils.load("train_df")
train_labels = _kale_marshal_utils.load("train_labels")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
logreg = LogisticRegression(solver='lbfgs', max_iter=110)
logreg.fit(train_df, train_labels)
acc_log = round(logreg.score(train_df, train_labels) * 100, 2)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(acc_log, "acc_log")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/logisticregression.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('logisticregression')
_kale_mlmd_utils.call("mark_execution_complete")
def randomforest():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
train_df = _kale_marshal_utils.load("train_df")
train_labels = _kale_marshal_utils.load("train_labels")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(train_df, train_labels)
acc_random_forest = round(random_forest.score(train_df, train_labels) * 100, 2)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(acc_random_forest, "acc_random_forest")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/randomforest.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('randomforest')
_kale_mlmd_utils.call("mark_execution_complete")
def results():
from kale.utils import mlmd_utils as _kale_mlmd_utils
_kale_mlmd_utils.init_metadata()
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
acc_decision_tree = _kale_marshal_utils.load("acc_decision_tree")
acc_gaussian = _kale_marshal_utils.load("acc_gaussian")
acc_linear_svc = _kale_marshal_utils.load("acc_linear_svc")
acc_log = _kale_marshal_utils.load("acc_log")
acc_random_forest = _kale_marshal_utils.load("acc_random_forest")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
'''
block2 = '''
results = pd.DataFrame({
'Model': ['Support Vector Machines', 'logistic Regression',
'Random Forest', 'Naive Bayes', 'Decision Tree'],
'Score': [acc_linear_svc, acc_log,
acc_random_forest, acc_gaussian, acc_decision_tree]})
result_df = results.sort_values(by='Score', ascending=False)
result_df = result_df.set_index('Score')
print(result_df)
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
)
html_artifact = _kale_run_code(blocks)
with open("/results.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('results')
_kale_mlmd_utils.call("mark_execution_complete")
loaddata_op = comp.func_to_container_op(loaddata)
datapreprocessing_op = comp.func_to_container_op(datapreprocessing)
featureengineering_op = comp.func_to_container_op(featureengineering)
decisiontree_op = comp.func_to_container_op(decisiontree)
svm_op = comp.func_to_container_op(svm)
naivebayes_op = comp.func_to_container_op(naivebayes)
logisticregression_op = comp.func_to_container_op(logisticregression)
randomforest_op = comp.func_to_container_op(randomforest)
results_op = comp.func_to_container_op(results)
@dsl.pipeline(
name='titanic-ml-gxj28',
description='Predict which passengers survived the Titanic shipwreck'
)
def auto_generated_pipeline():
pvolumes_dict = OrderedDict()
volume_step_names = []
volume_name_parameters = []
marshal_vop = dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=dsl.VOLUME_MODE_RWM,
size="1Gi"
)
volume_step_names.append(marshal_vop.name)
volume_name_parameters.append(marshal_vop.outputs["name"].full_name)
pvolumes_dict['/marshal'] = marshal_vop.volume
volume_step_names.sort()
volume_name_parameters.sort()
loaddata_task = loaddata_op()\
.add_pvolumes(pvolumes_dict)\
.after()
loaddata_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
loaddata_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'loaddata': '/loaddata.html'})
loaddata_task.output_artifact_paths.update(output_artifacts)
loaddata_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = loaddata_task.dependent_names + volume_step_names
loaddata_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
loaddata_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
datapreprocessing_task = datapreprocessing_op()\
.add_pvolumes(pvolumes_dict)\
.after(loaddata_task)
datapreprocessing_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
datapreprocessing_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'datapreprocessing': '/datapreprocessing.html'})
datapreprocessing_task.output_artifact_paths.update(output_artifacts)
datapreprocessing_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = datapreprocessing_task.dependent_names + volume_step_names
datapreprocessing_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
datapreprocessing_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
featureengineering_task = featureengineering_op()\
.add_pvolumes(pvolumes_dict)\
.after(datapreprocessing_task)
featureengineering_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
featureengineering_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'featureengineering': '/featureengineering.html'})
featureengineering_task.output_artifact_paths.update(output_artifacts)
featureengineering_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = featureengineering_task.dependent_names + volume_step_names
featureengineering_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
featureengineering_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
decisiontree_task = decisiontree_op()\
.add_pvolumes(pvolumes_dict)\
.after(featureengineering_task)
decisiontree_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
decisiontree_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'decisiontree': '/decisiontree.html'})
decisiontree_task.output_artifact_paths.update(output_artifacts)
decisiontree_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = decisiontree_task.dependent_names + volume_step_names
decisiontree_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
decisiontree_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
svm_task = svm_op()\
.add_pvolumes(pvolumes_dict)\
.after(featureengineering_task)
svm_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
svm_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'svm': '/svm.html'})
svm_task.output_artifact_paths.update(output_artifacts)
svm_task.add_pod_label("pipelines.kubeflow.org/metadata_written", "true")
dep_names = svm_task.dependent_names + volume_step_names
svm_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
svm_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
naivebayes_task = naivebayes_op()\
.add_pvolumes(pvolumes_dict)\
.after(featureengineering_task)
naivebayes_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
naivebayes_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'naivebayes': '/naivebayes.html'})
naivebayes_task.output_artifact_paths.update(output_artifacts)
naivebayes_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = naivebayes_task.dependent_names + volume_step_names
naivebayes_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
naivebayes_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
logisticregression_task = logisticregression_op()\
.add_pvolumes(pvolumes_dict)\
.after(featureengineering_task)
logisticregression_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
logisticregression_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'logisticregression': '/logisticregression.html'})
logisticregression_task.output_artifact_paths.update(output_artifacts)
logisticregression_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = logisticregression_task.dependent_names + volume_step_names
logisticregression_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
logisticregression_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
randomforest_task = randomforest_op()\
.add_pvolumes(pvolumes_dict)\
.after(featureengineering_task)
randomforest_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
randomforest_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'randomforest': '/randomforest.html'})
randomforest_task.output_artifact_paths.update(output_artifacts)
randomforest_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = randomforest_task.dependent_names + volume_step_names
randomforest_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
randomforest_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
results_task = results_op()\
.add_pvolumes(pvolumes_dict)\
.after(randomforest_task, logisticregression_task, naivebayes_task, svm_task, decisiontree_task)
results_task.container.working_dir = "/Users/animeshsingh/go/src/github.com/kubeflow/kale/examples/titanic-ml-dataset"
results_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'results': '/results.html'})
results_task.output_artifact_paths.update(output_artifacts)
results_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
dep_names = results_task.dependent_names + volume_step_names
results_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
results_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('titanic')
# Submit a pipeline run
from kale.utils.kfp_utils import generate_run_name
run_name = generate_run_name('titanic-ml-gxj28')
run_result = client.run_pipeline(
experiment.id, run_name, pipeline_filename, {})
| apache-2.0 |
jasontlam/snorkel | snorkel/annotations.py | 1 | 24365 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
import numpy as np
from pandas import DataFrame, Series
import scipy.sparse as sparse
from sqlalchemy.sql import bindparam, select
from .features import get_span_feats
from .models import (
GoldLabel, GoldLabelKey, Label, LabelKey, Feature, FeatureKey, Candidate,
Marginal
)
from .models.meta import new_sessionmaker
from .udf import UDF, UDFRunner
from .utils import (
matrix_conflicts,
matrix_coverage,
matrix_overlaps,
matrix_tp,
matrix_fp,
matrix_fn,
matrix_tn
)
class csr_AnnotationMatrix(sparse.csr_matrix):
"""
An extension of the scipy.sparse.csr_matrix class for holding sparse annotation matrices
and related helper methods.
"""
def __init__(self, arg1, **kwargs):
# Note: Currently these need to return None if unset, otherwise matrix copy operations break...
self.candidate_index = kwargs.pop('candidate_index', None)
self.row_index = kwargs.pop('row_index', None)
self.annotation_key_cls = kwargs.pop('annotation_key_cls', None)
self.key_index = kwargs.pop('key_index', None)
self.col_index = kwargs.pop('col_index', None)
# Note that scipy relies on the first three letters of the class to define matrix type...
super(csr_AnnotationMatrix, self).__init__(arg1, **kwargs)
def get_candidate(self, session, i):
"""Return the Candidate object corresponding to row i"""
return session.query(Candidate).filter(Candidate.id == self.row_index[i]).one()
def get_row_index(self, candidate):
"""Return the row index of the Candidate"""
return self.candidate_index[candidate.id]
def get_key(self, session, j):
"""Return the AnnotationKey object corresponding to column j"""
return session.query(self.annotation_key_cls)\
.filter(self.annotation_key_cls.id == self.col_index[j]).one()
def get_col_index(self, key):
"""Return the cow index of the AnnotationKey"""
return self.key_index[key.id]
def _get_sliced_indexes(self, s, axis, index, inv_index):
"""
Remaps the indexes between matrix rows/cols and candidates/keys.
Note: This becomes a massive performance bottleneck if not implemented
properly, so be careful of changing!
"""
if isinstance(s, slice):
# Check for empty slice
if s.start is None and s.stop is None:
return index, inv_index
else:
idxs = np.arange(self.shape[axis])[s]
elif isinstance(s, int):
idxs = np.array([s])
else: # s is an array of ints
idxs = s
# If s is the entire slice, skip the remapping step
if np.array_equal(idxs, list(range(len(idxs)))):
return index, inv_index
index_new, inv_index_new = {}, {}
for i_new, i in enumerate(idxs):
k = index[i]
index_new[i_new] = k
inv_index_new[k] = i_new
return index_new, inv_index_new
def __getitem__(self, key):
X = super(csr_AnnotationMatrix, self).__getitem__(key)
# If X is an integer or float value, just return it
if type(X) in [int, float] or issubclass(type(X), np.integer)\
or issubclass(type(X), np.float):
return X
# If X is a matrix, make sure it stays a csr_AnnotationMatrix
elif not isinstance(X, csr_AnnotationMatrix):
X = csr_AnnotationMatrix(X)
# X must be a matrix, so update appropriate csr_AnnotationMatrix fields
X.annotation_key_cls = self.annotation_key_cls
row_slice, col_slice = self._unpack_index(key)
X.row_index, X.candidate_index = self._get_sliced_indexes(
row_slice, 0, self.row_index, self.candidate_index)
X.col_index, X.key_index = self._get_sliced_indexes(
col_slice, 1, self.col_index, self.key_index)
return X
def stats(self):
"""Return summary stats about the annotations"""
raise NotImplementedError()
try:
class csr_LabelMatrix(csr_AnnotationMatrix):
def lf_stats(self, session, labels=None, est_accs=None):
"""Returns a pandas DataFrame with the LFs and various per-LF statistics"""
lf_names = [self.get_key(session, j).name for j in range(self.shape[1])]
# Default LF stats
col_names = ['j', 'Coverage', 'Overlaps', 'Conflicts']
d = {
'j' : list(range(self.shape[1])),
'Coverage' : Series(data=matrix_coverage(self), index=lf_names),
'Overlaps' : Series(data=matrix_overlaps(self), index=lf_names),
'Conflicts' : Series(data=matrix_conflicts(self), index=lf_names)
}
if labels is not None:
col_names.extend(['TP', 'FP', 'FN', 'TN', 'Empirical Acc.'])
ls = np.ravel(labels.todense() if sparse.issparse(labels) else labels)
tp = matrix_tp(self, ls)
fp = matrix_fp(self, ls)
fn = matrix_fn(self, ls)
tn = matrix_tn(self, ls)
ac = (tp+tn) / (tp+tn+fp+fn)
d['Empirical Acc.'] = Series(data=ac, index=lf_names)
d['TP'] = Series(data=tp, index=lf_names)
d['FP'] = Series(data=fp, index=lf_names)
d['FN'] = Series(data=fn, index=lf_names)
d['TN'] = Series(data=tn, index=lf_names)
if est_accs is not None:
col_names.append('Learned Acc.')
d['Learned Acc.'] = est_accs
d['Learned Acc.'].index = lf_names
return DataFrame(data=d, index=lf_names)[col_names]
# This is a hack for getting the documentation to build...
except:
class csr_LabelMatrix(object):
def lf_stats(self, session, labels=None, est_accs=None):
return None
class Annotator(UDFRunner):
"""Abstract class for annotating candidates and persisting these annotations to DB"""
def __init__(self, annotation_class, annotation_key_class, f_gen):
self.annotation_class = annotation_class
self.annotation_key_class = annotation_key_class
super(Annotator, self).__init__(AnnotatorUDF,
annotation_class=annotation_class,
annotation_key_class=annotation_key_class,
f_gen=f_gen)
def apply(self, split=0, key_group=0, replace_key_set=True, cids_query=None,
**kwargs):
# If we are replacing the key set, make sure the reducer key id cache is cleared!
if replace_key_set:
self.reducer.key_cache = {}
# Get the cids based on the split, and also the count
SnorkelSession = new_sessionmaker()
session = SnorkelSession()
cids_query = cids_query or session.query(Candidate.id)\
.filter(Candidate.split == split)
# Note: In the current UDFRunner implementation, we load all these into memory and fill a
# multiprocessing JoinableQueue with them before starting... so might as well load them here and pass in.
# Also, if we try to pass in a query iterator instead, with AUTOCOMMIT on, we get a TXN error...
cids = cids_query.all()
cids_count = len(cids)
# Run the Annotator
super(Annotator, self).apply(cids, split=split, key_group=key_group,
replace_key_set=replace_key_set, cids_query=cids_query,
count=cids_count, **kwargs)
# Load the matrix
return self.load_matrix(session, split=split, cids_query=cids_query,
key_group=key_group)
def clear(self, session, split=0, key_group=0, replace_key_set=True,
cids_query=None, **kwargs):
"""
Deletes the Annotations for the Candidates in the given split.
If replace_key_set=True, deletes *all* Annotations (of this Annotation sub-class)
and also deletes all AnnotationKeys (of this sub-class)
"""
query = session.query(self.annotation_class)
# If replace_key_set=False, then we just delete the annotations for candidates in our split
if not replace_key_set:
sub_query = cids_query or session.query(Candidate.id)\
.filter(Candidate.split == split)
sub_query = sub_query.subquery()
query = query.filter(self.annotation_class.candidate_id.in_(sub_query))
query.delete(synchronize_session='fetch')
# If we are creating a new key set, delete all old annotation keys
if replace_key_set:
query = session.query(self.annotation_key_class)
query = query.filter(self.annotation_key_class.group == key_group)
query.delete(synchronize_session='fetch')
def apply_existing(self, split=0, key_group=0, cids_query=None, **kwargs):
"""Alias for apply that emphasizes we are using an existing AnnotatorKey set."""
return self.apply(split=split, key_group=key_group,
replace_key_set=False, cids_query=cids_query, **kwargs)
def load_matrix(self, session, split=0, key_group=0, cids_query=None,
**kwargs):
raise NotImplementedError()
class AnnotatorUDF(UDF):
def __init__(self, annotation_class, annotation_key_class, f_gen, **kwargs):
self.annotation_class = annotation_class
self.annotation_key_class = annotation_key_class
# AnnotatorUDF relies on a *generator function* which yields annotations
# given a candidate input
# NB: inspect.isgeneratorfunction is not sufficient to check if f_ger
# is a generator (does not work with fns that wrap gen, e.g. partial)
# So no check here at the moment...
self.anno_generator = f_gen
# For caching key ids during the reduce step
self.key_cache = {}
super(AnnotatorUDF, self).__init__(**kwargs)
def apply(self, cid, **kwargs):
"""
Applies a given function to a Candidate, yielding a set of Annotations as key_name, value pairs
Note: Accepts a candidate _id_ as argument, because of issues with putting Candidate subclasses
into Queues (can't pickle...)
"""
seen = set()
cid = cid[0]
c = self.session.query(Candidate).filter(Candidate.id == cid).one()
for key_name, value in self.anno_generator(c):
# Note: Make sure no duplicates emitted here!
if (cid, key_name) not in seen:
seen.add((cid, key_name))
yield cid, key_name, value
def reduce(self, y, clear, key_group, replace_key_set, **kwargs):
"""
Inserts Annotations into the database.
For Annotations with unseen AnnotationKeys (in key_group, if not None), either adds these
AnnotationKeys if create_new_keyset is True, else skips these Annotations.
"""
cid, key_name, value = y
# Prepares queries
# Annoation updating only needs to be done if clear=False
if not clear:
anno_update_query = self.annotation_class.__table__.update()
anno_update_query = anno_update_query.where(self.annotation_class.candidate_id == bindparam('cid'))
anno_update_query = anno_update_query.where(self.annotation_class.key_id == bindparam('kid'))
anno_update_query = anno_update_query.values(value=bindparam('value'))
# We only need to insert AnnotationKeys if replace_key_set=True
# Note that in current configuration, we never update AnnotationKeys!
if replace_key_set:
key_insert_query = self.annotation_key_class.__table__.insert()
# If we are replacing the AnnotationKeys (replace_key_set=True), then we assume they will
# all have been handled by *this* reduce thread, and hence be in the cache already
# So we only need key select queries if replace_key_set=False
else:
key_select_query = select([self.annotation_key_class.id])\
.where(self.annotation_key_class.name == bindparam('name'))
if key_group is not None:
key_select_query = key_select_query.where(self.annotation_key_class.group == key_group)
anno_insert_query = self.annotation_class.__table__.insert()
# Check if the AnnotationKey already exists, and gets its id
key_id = None
if key_name in self.key_cache:
key_id = self.key_cache[key_name]
else:
key_args = {'name': key_name, 'group': key_group} if key_group else {'name': key_name}
# If we are replacing the AnnotationKeys (replace_key_set=True), then we assume they will
# all have been handled by *this* reduce thread, and hence be in the cache already
if not replace_key_set:
key_id = self.session.execute(key_select_query, key_args).first()
# Key not in cache but exists in DB; add to cache
if key_id is not None:
key_id = key_id[0]
self.key_cache[key_name] = key_id
# Key not in cache or DB; add to both if create_new_keyset = True
elif replace_key_set:
key_id = self.session.execute(key_insert_query, key_args).inserted_primary_key[0]
self.key_cache[key_name] = key_id
# If AnnotationKey does not exist and create_new_keyset = False, skip
if key_id is not None:
# Updates the Annotation, assuming one might already exist, if try_update = True
if not clear:
res = self.session.execute(anno_update_query, {'cid': cid, 'kid': key_id, 'value': value})
# If Annotation does not exist, insert
if (clear or res.rowcount == 0) and value != 0:
self.session.execute(anno_insert_query, {'candidate_id': cid, 'key_id': key_id, 'value': value})
def load_matrix(matrix_class, annotation_key_class, annotation_class, session,
split=0, cids_query=None, key_group=0, key_names=None, zero_one=False,
load_as_array=False):
"""
Returns the annotations corresponding to a split of candidates with N members
and an AnnotationKey group with M distinct keys as an N x M CSR sparse matrix.
"""
cid_query = cids_query or session.query(Candidate.id)\
.filter(Candidate.split == split)
cid_query = cid_query.order_by(Candidate.id)
keys_query = session.query(annotation_key_class.id)
keys_query = keys_query.filter(annotation_key_class.group == key_group)
if key_names is not None:
keys_query = keys_query.filter(annotation_key_class.name.in_(frozenset(key_names)))
keys_query = keys_query.order_by(annotation_key_class.id)
# First, we query to construct the row index map
cid_to_row = {}
row_to_cid = {}
for cid, in cid_query.all():
if cid not in cid_to_row:
j = len(cid_to_row)
# Create both mappings
cid_to_row[cid] = j
row_to_cid[j] = cid
# Second, we query to construct the column index map
kid_to_col = {}
col_to_kid = {}
for kid, in keys_query.all():
if kid not in kid_to_col:
j = len(kid_to_col)
# Create both mappings
kid_to_col[kid] = j
col_to_kid[j] = kid
# Create sparse matrix in COO format for incremental construction
row = []
columns = []
data = []
# Rely on the core for fast iteration
annot_select_query = annotation_class.__table__.select()
# Iteratively construct row index and output sparse matrix
# Cycles through the entire table to load the data.
# Perfornamce may slow down based on table size; however, negligible since
# it takes 8min to go throuh 245M rows (pretty fast).
for res in session.execute(annot_select_query):
# NOTE: The order of return seems to be switched in Python 3???
# Either way, make sure the order is set here explicitly!
cid, kid, val = res.candidate_id, res.key_id, res.value
if cid in cid_to_row and kid in kid_to_col:
# Optionally restricts val range to {0,1}, mapping -1 -> 0
if zero_one:
val = 1 if val == 1 else 0
row.append(cid_to_row[cid])
columns.append(kid_to_col[kid])
data.append(int(val))
X = sparse.coo_matrix((data, (row, columns)), shape=(len(cid_to_row), len(kid_to_col)))
# Return as an AnnotationMatrix
Xr = matrix_class(X, candidate_index=cid_to_row, row_index=row_to_cid,
annotation_key_cls=annotation_key_class, key_index=kid_to_col,
col_index=col_to_kid)
return np.squeeze(Xr.toarray()) if load_as_array else Xr
def load_label_matrix(session, **kwargs):
return load_matrix(csr_LabelMatrix, LabelKey, Label, session, **kwargs)
def load_feature_matrix(session, **kwargs):
return load_matrix(csr_AnnotationMatrix, FeatureKey, Feature, session, **kwargs)
def load_gold_labels(session, annotator_name, **kwargs):
return load_matrix(csr_LabelMatrix, GoldLabelKey, GoldLabel, session, key_names=[annotator_name], **kwargs)
class LabelAnnotator(Annotator):
"""Apply labeling functions to the candidates, generating Label annotations
:param lfs: A _list_ of labeling functions (LFs)
"""
def __init__(self, lfs=None, label_generator=None):
if lfs is not None:
labels = lambda c : [(lf.__name__, lf(c)) for lf in lfs]
elif label_generator is not None:
labels = lambda c : label_generator(c)
else:
raise ValueError("Must provide lfs or label_generator kwarg.")
# Convert lfs to a generator function
# In particular, catch verbose values and convert to integer ones
def f_gen(c):
for lf_key, label in labels(c):
# Note: We assume if the LF output is an int, it is already
# mapped correctly
if isinstance(label, int):
yield lf_key, label
# None is a protected LF output value corresponding to 0,
# representing LF abstaining
elif label is None:
yield lf_key, 0
elif label in c.values:
if c.cardinality > 2:
yield lf_key, c.values.index(label) + 1
# Note: Would be nice to not special-case here, but for
# consistency we leave binary LF range as {-1,0,1}
else:
val = 1 if c.values.index(label) == 0 else -1
yield lf_key, val
else:
raise ValueError("""
Unable to parse label with value %s
for candidate with values %s""" % (label, c.values))
super(LabelAnnotator, self).__init__(Label, LabelKey, f_gen)
def load_matrix(self, session, **kwargs):
return load_label_matrix(session, **kwargs)
class FeatureAnnotator(Annotator):
"""Apply feature generators to the candidates, generating Feature annotations"""
def __init__(self, f=get_span_feats):
super(FeatureAnnotator, self).__init__(Feature, FeatureKey, f)
def load_matrix(self, session, **kwargs):
return load_feature_matrix(session, **kwargs)
def save_marginals(session, X, marginals, training=True):
"""Save marginal probabilities for a set of Candidates to db.
:param X: Either an M x N csr_AnnotationMatrix-class matrix, where M
is number of candidates, N number of LFs/features; OR a list of
arbitrary objects with candidate ids accessible via a .id attrib
:param marginals: A dense M x K matrix of marginal probabilities, where
K is the cardinality of the candidates, OR a M-dim list/array if K=2.
:param training: If True, these are training marginals / labels; else they
are saved as end model predictions.
Note: The marginals for k=0 are not stored, only for k = 1,...,K
"""
# Make sure that we are working with a numpy array
try:
shape = marginals.shape
except:
marginals = np.array(marginals)
shape = marginals.shape
# Handle binary input as M x 1-dim array; assume elements represent
# poksitive (k=1) class values
if len(shape) == 1:
marginals = np.vstack([1-marginals, marginals]).T
# Only add values for classes k=1,...,K
marginal_tuples = []
for i in range(shape[0]):
for k in range(1, shape[1] if len(shape) > 1 else 2):
if marginals[i, k] > 0:
marginal_tuples.append((i, k, marginals[i, k]))
# NOTE: This will delete all existing marginals of type `training`
session.query(Marginal).filter(Marginal.training == training).\
delete(synchronize_session='fetch')
# Prepare bulk INSERT query
q = Marginal.__table__.insert()
# Check whether X is an AnnotationMatrix or not
anno_matrix = isinstance(X, csr_AnnotationMatrix)
if not anno_matrix:
X = list(X)
# Prepare values
insert_vals = []
for i, k, p in marginal_tuples:
cid = X.get_candidate(session, i).id if anno_matrix else X[i].id
insert_vals.append({
'candidate_id': cid,
'training': training,
'value': k,
# We cast p in case its a numpy type, which psycopg2 does not handle
'probability': float(p)
})
# Execute update
session.execute(q, insert_vals)
session.commit()
print("Saved %s marginals" % len(marginals))
def load_marginals(session, X=None, split=0, cids_query=None, training=True):
"""Load the marginal probs. for a given split of Candidates"""
# For candidate ids subquery
cids_query = cids_query or session.query(Candidate.id) \
.filter(Candidate.split == split)
# Ensure ordering by CID
cids_query = cids_query.order_by(Candidate.id)
cids_sub_query = cids_query.subquery('cids')
# Load marginal tuples from db
marginal_tuples = session.query(Marginal.candidate_id, Marginal.value,
Marginal.probability) \
.filter(Marginal.candidate_id == cids_sub_query.c.id) \
.filter(Marginal.training == training) \
.all()
# If an AnnotationMatrix or list of candidates X is provided, we make sure
# that the returned marginals are collated with X.
if X is not None:
# For now, handle feature matrix vs. list of objects with try / except
# Handle AnnotationMatrix
try:
cardinality = X.get_candidate(session, 0).cardinality
marginals = np.zeros((X.shape[0], cardinality))
cid_map = X.candidate_index
# Handle list of Candidates
except:
cardinality = X[0].cardinality
marginals = np.zeros((len(X), cardinality))
cid_map = dict([(x.id, i) for i, x in enumerate(X)])
# Otherwise if X is not provided, we sort by candidate id, using the
# cids_query from above
else:
cardinality = session.query(Candidate) \
.get(marginal_tuples[0][0]).cardinality
marginals = np.zeros((cids_query.count(), cardinality))
cid_map = dict([(cid, i) for i, (cid,) in enumerate(cids_query.all())])
# Assemble the marginals matrix according to the candidate index of X
for cid, k, p in marginal_tuples:
marginals[cid_map[cid], k] = p
# Add first column if k > 2, else ravel
if cardinality > 2:
row_sums = marginals.sum(axis=1)
for i in range(marginals.shape[0]):
marginals[i, 0] = 1 - row_sums[i]
else:
marginals = np.ravel(marginals[:, 1])
return marginals
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/util/test_deprecate_kwarg.py | 2 | 2080 | # -*- coding: utf-8 -*-
import pytest
from pandas.util._decorators import deprecate_kwarg
import pandas.util.testing as tm
@deprecate_kwarg("old", "new")
def _f1(new=False):
return new
_f2_mappings = {"yes": True, "no": False}
@deprecate_kwarg("old", "new", _f2_mappings)
def _f2(new=False):
return new
def _f3_mapping(x):
return x + 1
@deprecate_kwarg("old", "new", _f3_mapping)
def _f3(new=0):
return new
@pytest.mark.parametrize("key,klass", [
("old", FutureWarning),
("new", None)
])
def test_deprecate_kwarg(key, klass):
x = 78
with tm.assert_produces_warning(klass):
assert _f1(**{key: x}) == x
@pytest.mark.parametrize("key", list(_f2_mappings.keys()))
def test_dict_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == _f2_mappings[key]
@pytest.mark.parametrize("key", ["bogus", 12345, -1.23])
def test_missing_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == key
@pytest.mark.parametrize("x", [1, -1.4, 0])
def test_callable_deprecate_kwarg(x):
with tm.assert_produces_warning(FutureWarning):
assert _f3(old=x) == _f3_mapping(x)
def test_callable_deprecate_kwarg_fail():
msg = "((can only|cannot) concatenate)|(must be str)|(Can't convert)"
with pytest.raises(TypeError, match=msg):
_f3(old="hello")
def test_bad_deprecate_kwarg():
msg = "mapping from old to new argument values must be dict or callable!"
with pytest.raises(TypeError, match=msg):
@deprecate_kwarg("old", "new", 0)
def f4(new=None):
return new
@deprecate_kwarg("old", None)
def _f4(old=True, unchanged=True):
return old, unchanged
@pytest.mark.parametrize("key", ["old", "unchanged"])
def test_deprecate_keyword(key):
x = 9
if key == "old":
klass = FutureWarning
expected = (x, True)
else:
klass = None
expected = (True, x)
with tm.assert_produces_warning(klass):
assert _f4(**{key: x}) == expected
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
dcprojects/CoolProp | Web/scripts/fluid_properties.REFPROPcomparison.py | 3 | 3518 | from __future__ import print_function
import os.path
import CoolProp, CoolProp.CoolProp as CP
import subprocess
import sys
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
root_dir = os.path.abspath(os.path.join(web_dir, '..'))
fluids_path = os.path.join(web_dir,'fluid_properties','fluids')
plots_path = os.path.join(web_dir,'fluid_properties','fluids','REFPROPplots')
template = """
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') # Use a non-GUI backend
import numpy as np, matplotlib.pyplot as plt
import CoolProp
CP = CoolProp.CoolProp
fluid = '{fluid:s}'
fig, ax = plt.subplots()
plt.ylim(10**-18, 10**2)
not_in_REFPROP = False
try:
if CP.get_fluid_param_string(fluid, "REFPROP_name") == 'N/A':
not_in_REFPROP = True
else:
RPfluid = 'REFPROP::' + CP.get_fluid_param_string(fluid, "REFPROP_name")
CAS = CP.get_fluid_param_string(RPfluid, "CAS")
except (RuntimeError,ValueError) as E:
not_in_REFPROP = True
if not_in_REFPROP:
ax.set_xlim(0,1)
xlims = ax.get_xlim()
ylims = ax.get_ylim()
ax.plot([xlims[0],xlims[1]],[ylims[0],ylims[1]],lw = 3,c = 'r')
ax.plot([xlims[0],xlims[1]],[ylims[1],ylims[0]],lw = 3,c = 'r')
x = 0.5
y = (ylims[0]*ylims[1])**0.5
ax.text(x,y,'Not\\nin\\nREFPROP',ha='center',va ='center',bbox = dict(fc = 'white'))
else:
RPfluid = 'REFPROP::' + CP.get_fluid_param_string(fluid, "REFPROP_name")
symbols = ["o", "v", "^", "<", ">","8", "s","p","*","h","H","+","x"]
T = np.min([1.01*CP.PropsSI(fluid, 'Tcrit'),CP.PropsSI(fluid, 'Tmax')])
rhoc = CP.PropsSI(fluid, 'rhomolar_critical')
# Normal properties
rho = np.linspace(1e-10, 2*rhoc)
normalkeys = ['P','V','L','Cpmolar','Cvmolar']
RPdata = CP.PropsSI(normalkeys, 'T', T, 'Dmolar', rho, RPfluid)
CPdata = CP.PropsSI(normalkeys, 'T', T, 'Dmolar', rho, fluid)
for i, key in enumerate(normalkeys):
plt.plot(rho/rhoc, np.abs(RPdata[:,i]/CPdata[:,i]-1)*100, lw = 0, label = key, marker = symbols[i%len(symbols)])
# Special properties
rho = np.linspace(1e-10, 2*rhoc)
keys = ['Hmolar','Smolar']
for i, key in enumerate(keys):
RPdata = CP.PropsSI(key, 'T', T, 'Dmolar', rho, RPfluid) - CP.PropsSI(key, 'T', T, 'Dmolar', 1, RPfluid)
CPdata = CP.PropsSI(key, 'T', T, 'Dmolar', rho, fluid) - CP.PropsSI(key, 'T', T, 'Dmolar', 1, fluid)
plt.plot(rho/rhoc, np.abs(RPdata/CPdata-1)*100, lw = 0, label = key, marker = symbols[(i+len(normalkeys))%len(symbols)])
ax.legend(loc='best', ncol = 2)
plt.xlabel(r'Reduced density [$\\rho/\\rho_c$]')
plt.ylabel(r'Relative deviation $(y_{{CP}}/y_{{RP}}-1)\\times 100$ [%]')
ax.set_yscale('log')
plt.title('Comparison between CoolProp and REFPROP({rpv:s}) along T = 1.01*Tc')
plt.savefig(fluid+'.png', dpi = 100)
plt.savefig(fluid+'.pdf')
plt.close('all')
"""
if not os.path.exists(plots_path):
os.makedirs(plots_path)
with open(os.path.join(plots_path, 'matplotlibrc'), 'w') as fp:
fp.write("backend : agg\n")
for fluid in CoolProp.__fluids__:
print('fluid:', fluid)
file_string = template.format(fluid = fluid, rpv = CP.get_global_param_string("REFPROP_version"))
file_path = os.path.join(plots_path, fluid + '.py')
print('Writing to', file_path)
with open(file_path, 'w') as fp:
fp.write(file_string)
subprocess.check_call('python "' + fluid + '.py"', cwd = plots_path, stdout = sys.stdout, stderr = sys.stderr, shell = True) | mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 20 | 5003 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
# pylint: enable=wildcard-import
class IOTest(tf.test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| agpl-3.0 |
TimBizeps/BachelorAP | FP 2018/V51 Operationsverstärker/auswertung/auswertung2.py | 1 | 2529 | import matplotlib as mpl
mpl.use('pgf')
import numpy as np
import scipy.constants as const
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from uncertainties import ufloat
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
f, Ua = np.genfromtxt('daten2.txt', unpack=True)
f2, Ua2 = np.genfromtxt('daten22.txt', unpack=True)
#für Fit alle Werte f und Vstrich logarithmieren und in nomalen Plot.
Ue = 258
R1 = 468
RN = 996
Vstricheff = Ua/Ue
Vstricheff2 = Ua2/Ue
Vstrich = Ua[0]/Ue
V = 1/((1/Vstrich)-(R1/RN))
print('V = ', V)
Vstrbvg = Vstrich/np.sqrt(2)
print('Vstrich = ', Vstrich)
#print('Vstricheff = ', Vstricheff)
logVstreff = np.log(Vstricheff)
logf = np.log(f)
logVstreff2 = np.log(Vstricheff2)
logf2 = np.log(f2)
logVstrich = np.log(Vstrich)
logVstrbvg = np.log(Vstrbvg)
#print('logVstreff = ', logVstreff)
#print('logf = ', logf)
#np.savetxt("logdaten2.txt", np.column_stack([logf,logVstreff]))
#np.savetxt("logdaten22.txt", np.column_stack([logf2,logVstreff2]))
print('logVstrbvg = ', logVstrbvg)
def linear(x, m, b):
return m*x+b
def umkehr(y, m, b):
return (y-b)/m
#fit mit linearer Funktion für die entsprechenden Werte
params, cov = curve_fit(linear, logf2, logVstreff2)
errors = np.sqrt(np.diag(cov))
m = params[0]
m_err = errors[0]
b = params[1]
b_err = errors[1]
mmit = ufloat(m, m_err)
bmit = ufloat(b, b_err)
print('m = ', mmit)
print('b = ', bmit)
#umkehrfunktion mit log Vstrbvg aufrufen => v'g
logvg = umkehr(logVstrbvg, mmit, bmit)
#print('logvg = ', logvg)
vg = unp.exp(logvg)
print('vg = ', vg)
Vstrmalvg = Vstrich*vg
print('Vstrmalvg = ', Vstrmalvg)
l = np.linspace(5.5, 7.5, 1000)
plt.plot(logf, logVstreff, 'rx', label='doppelt logarithmische Messwerte')
plt.plot(logf2, logVstreff2, 'kx', label='für den Fit verwendete Messwerte')
plt.plot(l, linear(l,m,b), 'r-', label='Fit')
plt.xlabel(r'$\log \nu$')
plt.ylabel(r'$\log V^\prime_\text{eff}$')
#plt.xlim(,)
#plt.ylim(100, 400)
plt.axhline(y=logVstrbvg, xmin=0, xmax=1, color='b', ls='-', label=r"$\log\frac{V^\prime}{\sqrt{2}}$", linewidth=1)
plt.axhline(y=logVstrich, xmin=0, xmax=1, color='b', ls='--', label=r"$\log V^\prime$", linewidth=1)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig("plot2.pdf")
#f_g etwa 775 kHz
| gpl-3.0 |
schoolie/bokeh | bokeh/charts/builders/timeseries_builder.py | 6 | 3961 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries chart, which provides a convenient interface for
generating different charts using series-like data by transforming the data
to a consistent format and producing renderers.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from ..builder import create_and_build
from .line_builder import LineBuilder, PointSeriesBuilder
from .step_builder import StepBuilder
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
BUILDER_TYPES = {
'line': LineBuilder,
'step': StepBuilder,
'point': PointSeriesBuilder
}
def TimeSeries(data=None, x=None, y=None, builder_type=LineBuilder, **kws):
""" Create a timeseries chart using :class:`LineBuilder
<bokeh.charts.builder.line_builder.LineBuilder>` to produce the renderers from
the inputs. The timeseries chart acts as a switchboard to produce charts
for timeseries data with different glyph representations.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each stepped line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
builder_type (str or `Builder`, optional): the type of builder to use to produce
the renderers. Supported options are 'line', 'step', or 'point'.
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import TimeSeries, show, output_file
from bokeh.layouts import column
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
data = dict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
tsline = TimeSeries(data,
x='Date', y=['IBM', 'MSFT', 'AAPL'],
color=['IBM', 'MSFT', 'AAPL'], dash=['IBM', 'MSFT', 'AAPL'],
title="Timeseries", ylabel='Stock Prices', legend=True)
tspoint = TimeSeries(data,
x='Date', y=['IBM', 'MSFT', 'AAPL'],
color=['IBM', 'MSFT', 'AAPL'], dash=['IBM', 'MSFT', 'AAPL'],
builder_type='point', title="Timeseries Points",
ylabel='Stock Prices', legend=True)
output_file("timeseries.html")
show(column(tsline, tspoint))
"""
builder_type = BUILDER_TYPES.get(builder_type, builder_type)
kws['x'] = x
kws['y'] = y
return create_and_build(builder_type, data, **kws)
| bsd-3-clause |
bikash/pyhsmm | util/testing.py | 1 | 3632 | from __future__ import division
import numpy as np
from numpy import newaxis as na
from matplotlib import pyplot as plt
import stats, general
#########################
# statistical testing #
#########################
### graphical
def populations_eq_quantile_plot(pop1, pop2, fig=None, percentilecutoff=5):
pop1, pop2 = stats.flattendata(pop1), stats.flattendata(pop2)
assert pop1.ndim == pop2.ndim == 1 or \
(pop1.ndim == pop2.ndim == 2 and pop1.shape[1] == pop2.shape[1]), \
'populations must have consistent dimensions'
D = pop1.shape[1] if pop1.ndim == 2 else 1
# we want to have the same number of samples
n1, n2 = pop1.shape[0], pop2.shape[0]
if n1 != n2:
# subsample, since interpolation is dangerous
if n1 < n2:
pop1, pop2 = pop2, pop1
np.random.shuffle(pop1)
pop1 = pop1[:pop2.shape[0]]
def plot_1d_scaled_quantiles(p1,p2,plot_midline=True):
# scaled quantiles so that multiple calls line up
p1.sort(), p2.sort() # NOTE: destructive! but that's cool
xmin,xmax = general.scoreatpercentile(p1,percentilecutoff), \
general.scoreatpercentile(p1,100-percentilecutoff)
ymin,ymax = general.scoreatpercentile(p2,percentilecutoff), \
general.scoreatpercentile(p2,100-percentilecutoff)
plt.plot((p1-xmin)/(xmax-xmin),(p2-ymin)/(ymax-ymin))
if plot_midline:
plt.plot((0,1),(0,1),'k--')
plt.axis((0,1,0,1))
if D == 1:
if fig is None:
plt.figure()
plot_1d_scaled_quantiles(pop1,pop2)
else:
if fig is None:
fig = plt.figure()
if not hasattr(fig,'_quantile_test_projs'):
firsttime = True
randprojs = np.random.randn(D,D)
randprojs /= np.sqrt(np.sum(randprojs**2,axis=1))[:,na]
projs = np.vstack((np.eye(D),randprojs))
fig._quantile_test_projs = projs
else:
firsttime = False
projs = fig._quantile_test_projs
ims1, ims2 = pop1.dot(projs.T), pop2.dot(projs.T)
for i, (im1, im2) in enumerate(zip(ims1.T,ims2.T)):
plt.subplot(2,D,i)
plot_1d_scaled_quantiles(im1,im2,plot_midline=firsttime)
### numerical
# NOTE: a random numerical test should be repeated at the OUTERMOST loop (with
# exception catching) to see if its failures exceed the number expected
# according to the specified pvalue (tests could be repeated via sample
# bootstrapping inside the test, but that doesn't work reliably and random tests
# should have no problem generating new randomness!)
def assert_populations_eq(pop1, pop2):
assert_populations_eq_moments(pop1,pop2) and \
assert_populations_eq_komolgorofsmirnov(pop1,pop2)
def assert_populations_eq_moments(pop1, pop2, **kwargs):
# just first two moments implemented; others are hard to estimate anyway!
assert_populations_eq_means(pop1,pop2,**kwargs) and \
assert_populations_eq_variances(pop1,pop2,**kwargs)
def assert_populations_eq_means(pop1, pop2, pval=0.05, msg=None):
_,p = stats.two_sample_t_statistic(pop1,pop2)
if np.any(p < pval):
raise AssertionError(msg or "population means might be different at %0.3f" % pval)
def assert_populations_eq_variances(pop1, pop2, pval=0.05, msg=None):
_,p = stats.f_statistic(pop1, pop2)
if np.any(p < pval):
raise AssertionError(msg or "population variances might be different at %0.3f" % pval)
def assert_populations_eq_komolgorofsmirnov(pop1, pop2, msg=None):
raise NotImplementedError # TODO
| mit |
nck0405/MyOwn | modules/tests/smoke/broken_links.py | 16 | 26739 | """ Sahana Eden Test Framework
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from time import time
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import sys
import socket
from tests.web2unittest import Web2UnitTest
from gluon import current
try:
from twill import get_browser
from twill import set_output
from twill.browser import *
except ImportError:
raise NameError("Twill not installed")
try:
import mechanize
#from mechanize import BrowserStateError
#from mechanize import ControlNotFoundError
except ImportError:
raise NameError("Mechanize not installed")
class BrokenLinkTest(Web2UnitTest):
""" Smoke Test, visit every link it can find and report on the outcome """
def __init__(self):
Web2UnitTest.__init__(self)
self.b = get_browser()
self.b_data = StringIO()
set_output(self.b_data)
self.clearRecord()
# This string must exist in the URL for it to be followed
# Useful to avoid going to linked sites
self.homeURL = self.url
# Link used to identify a URL to a ticket
self.url_ticket = "/admin/default/ticket/"
# Tuple of strings that if in the URL will be ignored
# Useful to avoid dynamic URLs that trigger the same functionality
self.include_ignore = ("_language=",
"logout",
"appadmin",
"admin",
"delete",
)
# tuple of strings that should be removed from the URL before storing
# Typically this will be some variables passed in via the URL
self.strip_url = ("?_next=",
)
self.reportOnly = False
self.maxDepth = 16 # sanity check
self.setThreshold(10)
self.setUser("[email protected]/eden")
self.total_visited = 0
self.broken_links_count = 0
def clearRecord(self):
# the total url links visited
self.totalLinks = 0
# The number of unique urls found at depth i, where i is the index
self.linkDepth = []
# Dictionary of the parent for each URL
self.urlParentList = {}
# dictionary of ReportData objects indexed on the url
self.results = {}
def setReportOnly(self, action):
self.reportOnly = action
def setDepth(self, depth):
self.maxDepth = depth
def setUser(self, user):
self.credentials = user.split(",")
def setThreshold(self, value):
value = float(value)
self.threshold = value
# socket.setdefaulttimeout(value*2)
def login(self, credentials):
if credentials == "UNAUTHENTICATED":
url = "%s/default/user/logout" % self.homeURL
self.b.go(url)
return True
try:
(self.user, self.password) = credentials.split("/",1)
except:
msg = "Unable to split %s into a user name and password" % user
self.reporter(msg)
return False
url = "%s/default/user/login" % self.homeURL
self.b.go(url)
forms = self.b.get_all_forms()
for form in forms:
try:
if form["_formname"] == "login":
self.b._browser.form = form
form["email"] = self.user
form["password"] = self.password
self.b.submit("Login")
# If login is successful then should be redirected to the homepage
return self.b.get_url()[len(self.homeURL):] == "/default/index"
except:
# This should be a mechanize.ControlNotFoundError, but
# for some unknown reason that isn't caught on Windows or Mac
pass
return False
def addResults2Current(self):
'''
Store the count links in gluon.current to be used by HTMLTestRunner for better reporting
'''
smoke_results = {}
smoke_results['working_links'] = self.total_visited - self.broken_links_count
smoke_results['broken_links_count'] = self.broken_links_count
current.data['smoke_results'] = smoke_results
def runTest(self):
"""
Test to find all exposed links and check the http code returned.
This test doesn't run any javascript so some false positives
will be found.
The test can also display an histogram depicting the number of
links found at each depth.
Failure or Success to be shown in the report is checked in addSuccess in TestResult
class
"""
for user in self.credentials:
self.clearRecord()
if self.login(user):
self.reporter("Smoke Test for user %s" % self.user)
self.visitLinks()
self.report()
self.addResults2Current()
else:
raise Exception("Login Failed")
def visitLinks(self):
url = self.homeURL + "/default/index"
to_visit = [url]
start = time()
self.total_visited = 0
if not self.reportOnly:
for depth in range(self.maxDepth):
if len(to_visit) == 0:
break
self.linkDepth.append(len(to_visit))
self.totalLinks += len(to_visit)
visit_start = time()
url_visited = "%d urls" % len(to_visit)
self.total_visited += len(to_visit)
to_visit = self.visit(to_visit, depth)
msg = "%.2d Visited %s in %.3f seconds, %d more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
self.reporter(msg)
if self.config.verbose >= 2:
if self.config.verbose >= 3:
print >> self.stdout
if self.stdout.isatty(): # terminal should support colour
msg = "%.2d Visited \033[1;32m%s\033[0m in %.3f seconds, \033[1;31m%d\033[0m more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
print >> self.stdout, msg
if len(to_visit) > 0:
self.linkDepth.append(len(to_visit))
finish = time()
self.reporter("Finished took %.3f seconds" % (finish - start))
def visit(self, url_list, depth):
repr_list = [".pdf", ".xls", ".rss", ".kml"]
to_visit = []
record_data = self.config.verbose > 0
for visited_url in url_list:
index_url = visited_url[len(self.homeURL):]
if record_data:
if index_url in self.results.keys():
print >> self.stdout, "Warning duplicated url: %s" % index_url
self.results[index_url] = ReportData()
current_results = self.results[index_url]
current_results.depth = depth
# Find out if the page can be visited
open_novisit = False
for repr in repr_list:
if repr in index_url:
open_novisit = True
break
try:
if open_novisit:
action = "open_novisit"
else:
action = "open"
visit_start = time()
self.b._journey(action, visited_url)
http_code = self.b.get_code()
duration = time() - visit_start
if record_data:
current_results.duration = duration
if duration > self.threshold:
if self.config.verbose >= 3:
print >> self.stdout, "%s took %.3f seconds" % (visited_url, duration)
except Exception as e:
duration = time() - visit_start
import traceback
print traceback.format_exc()
if record_data:
current_results.broken = True
current_results.exception = True
current_results.duration = duration
continue
http_code = self.b.get_code()
if http_code != 200:
if record_data:
current_results.broken = True
current_results.http_code = http_code
elif open_novisit:
continue
links = []
try:
if self.b._browser.viewing_html():
links = self.b._browser.links()
else:
continue
except Exception as e:
import traceback
print traceback.format_exc()
if record_data:
current_results.broken = True
current_results.exception = True
continue
for link in (links):
url = link.absolute_url
if url.find(self.url_ticket) != -1:
# A ticket was raised so...
# capture the details and add to brokenLinks
if record_data:
current_results.broken = True
current_results.ticket = url
break # no need to check any other links on this page
if url.find(self.homeURL) == -1:
continue
ignore_link = False
for ignore in self.include_ignore:
if url.find(ignore) != -1:
ignore_link = True
break
if ignore_link:
continue
for strip in self.strip_url:
location = url.find(strip)
if location != -1:
url = url[0:location]
short_url = url[len(self.homeURL):]
if url not in url_list and \
short_url != "" and \
short_url not in self.results.keys() and \
url not in to_visit:
self.urlParentList[short_url] = index_url
to_visit.append(url)
return to_visit
def report(self):
self.reporter("%d URLs visited" % self.totalLinks)
self.brokenReport()
self.timeReport()
if self.config.record_timings:
if not self.reportOnly:
self.record_timings()
self.scatterplot()
self.depthReport()
def record_timings(self):
import_error = ""
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
try:
import xlwt
except:
import_error += "ERROR: the xlwt modules is needed to record timings\n"
if import_error != "":
print >> self.stderr, import_error
return
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
workbook = None
summary = {}
if workbook:
summary = self.read_timings_sheet(workbook)
if len(summary["date"]) > 100:
# Need to rotate the file
# 1) make a summary and save this
self.report_timings_summary(summary, rec_time_filename)
# 2) archive the file
from zipfile import ZipFile
import os
zip_filename = os.path.join(self.config.path, "rec_time.zip")
archive = ZipFile(zip_filename, "a")
arc_name = "%s-%s.xls" % (rec_time_filename[len(self.config.path):-4],
current.request.now.date()
)
archive.write(rec_time_filename,arc_name)
archive.close()
# 3) clear the current file
os.unlink(rec_time_filename)
summary = {}
if "date" not in summary:
last_col = 0
summary["date"] = [current.request.now.date()]
else:
last_col = len(summary["date"])
summary["date"].append(current.request.now.date())
for (url, rd_obj) in self.results.items():
if url not in summary:
summary[url] = []
# ensure that the row is as long as the number of dates
shortage = last_col - len(summary[url])
if shortage > 0:
summary[url] = summary[url] + ['']*shortage
summary[url].append((rd_obj.get_duration(), rd_obj.is_broken()))
self.write_timings_sheet(summary, rec_time_filename)
def read_timings_sheet(self, workbook):
"""
This will extract all the details from the xls sheet
"""
sheet = workbook.sheet_by_name("Timings")
summary = {}
RED = 0x0A
num_cells = sheet.ncols
summary["date"] = []
for col in range(1, num_cells):
summary["date"].append(sheet.cell_value(0, col))
for row in range(1,sheet.nrows):
url = sheet.cell_value(row, 0)
summary[url] = []
for col in range(1, num_cells):
duration = sheet.cell_value(row, col)
xf = sheet.cell_xf_index(row, col)
bg = workbook.xf_list[xf].background
broken = (bg.pattern_colour_index == RED)
summary[url].append((duration, broken))
return summary
def write_timings_sheet(self, summary, filename=None):
import xlwt
RED = 0x0A
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Timings")
stylebroken = xlwt.XFStyle()
stylebroken.pattern.pattern = stylebroken.pattern.SOLID_PATTERN
stylebroken.pattern.pattern_fore_colour = RED
col = 1
for date in summary["date"]:
sheet.write(0,col,str(date))
col += 1
row = 1
for (url, results) in summary.items():
if url == "date":
continue
sheet.write(row,0,url)
col = 1
for data in results:
if len(data) == 2 and data[1]:
sheet.write(row,col,data[0],stylebroken)
elif len(data) > 0:
sheet.write(row,col,data[0])
col += 1
row += 1
if filename:
book.save(filename)
return book
def report_timings_summary(self,
summary,
summary_file_name = None,
mean_threshold = 1):
"""
This will extract the details from the sheet and optionally save
them to a summary file
summary: the summary details returned from the spreadsheet (read_timings_sheet)
summary_file_name: name of the file to record the summary details (if required)
mean_threshold: The minimum number of values required to include
the mean in the regression calculations
"""
import numpy
import datetime
good_values = []
other_values = []
total_values = []
for date in summary["date"]:
good_values.append([])
other_values.append([])
total_values.append([])
for (url,results) in summary.items():
if url == "date":
continue
else:
cnt = 0
for (duration, broken) in results:
if duration != "":
total_values[cnt].append(duration)
if broken:
other_values[cnt].append(duration)
else:
good_values[cnt].append(duration)
cnt += 1
# get the number of days each entry is after the first date
# and calculate the average, if the average is NAN then ignore both
date_summary = []
gv_mean = []
gv_std = []
gv_date = []
cnt = 0
start = datetime.datetime.strptime(summary["date"][0],"%Y-%m-%d")
for list in good_values:
if len(list) > mean_threshold:
mean = numpy.mean(list)
std = numpy.std(list)
if not numpy.isnan(mean):
this_date = datetime.datetime.strptime(summary["date"][cnt],"%Y-%m-%d")
date_summary.append((this_date - start).days)
gv_mean.append(mean)
gv_std.append(std)
gv_date.append(summary["date"][cnt])
cnt += 1
# calculate the regression line
if len(gv_mean) > 2:
(m,b) = numpy.polyfit(date_summary, gv_mean, 1)
else:
m = b = 0
if summary_file_name != None:
book = self.write_timings_sheet(summary)
sheet = book.add_sheet("summary")
row = 0
for date in gv_date:
sheet.write(row,0,str(date))
sheet.write(row,1,gv_mean[row])
row += 1
sheet.write(row,0,"Trend")
sheet.write(row,1,m)
# Save the details to the summary file
book.save(summary_file_name)
return (date_summary, gv_mean, gv_std, m, b)
def report_model_url(self):
print "Report breakdown by module"
for (model, value) in self.model_url.items():
print model
for ud in value:
url = ud[0]
depth = ud[1]
parent = ud[2]
tabs = "\t" * depth
print "%s %s-%s (parent url - %s)" % (tabs, depth, url, parent)
def brokenReport(self):
self.reporter("Broken Links")
as_html = current.test_config.html
self.broken_links_count = 0
for (url, rd_obj) in self.results.items():
if as_html:
print_url = "<a href=%s%s target=\"_blank\">%s</a>" % (self.homeURL, url, url)
else:
print_url = url
if rd_obj.is_broken():
if rd_obj.threw_exception():
msg = "(Exception) %s" % print_url
else:
http_code = rd_obj.return_http_code()
ticket = rd_obj.the_ticket(as_html)
try:
parent = self.urlParentList[url]
if as_html:
parent = "<a href=%s%s target=\"_blank\">Parent</a>" % (self.homeURL, parent)
except:
parent = "unknown"
msg = "%3d. (%s - %s) %s called from %s" % (self.broken_links_count + 1,
http_code,
ticket,
print_url,
parent
)
self.reporter(msg)
self.broken_links_count += 1
def timeReport(self):
from operator import itemgetter
import numpy
thresholdLink = {}
linktimes = []
for (url, rd_obj) in self.results.items():
duration = rd_obj.get_duration()
linktimes.append(duration)
if duration > self.threshold:
thresholdLink[url] = duration
self.reporter("Time Analysis - Links beyond threshold")
for (visited_url, duration) in sorted(thresholdLink.iteritems(),
key=itemgetter(1),
reverse=True):
self.reporter( "%s took %.3f seconds" % (visited_url, duration))
self.reporter("Time Analysis - summary")
total = len(linktimes)
average = numpy.mean(linktimes)
std = numpy.std(linktimes)
msg = "%s links visited with an average time of %.3f and standard deviation of %.3f" % (total, average, std)
self.reporter(msg)
def scatterplot(self):
"""
Method to draw a scatterplot of the average time to download links
against time. Add a regression line to show the trend over time.
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
import numpy
except ImportError:
return
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
return
import numpy
# Only include the mean in the regression values if there are at least 10 URL timings
summary = self.read_timings_sheet(workbook)
(date_summary, gv_mean, gv_std, m, b) = self.report_timings_summary(summary, mean_threshold=10)
if len(gv_mean) <= 2:
return
fig = Figure(figsize=(5, 2.5))
canvas = self.FigureCanvas(fig)
ax = fig.add_subplot(111)
linear = numpy.poly1d([m,b])
denom = numpy.max(gv_std)/50
size = gv_std/denom
ax.scatter(date_summary, gv_mean, marker="d", s=size)
ax.plot(date_summary, linear(date_summary), '--r')
chart = StringIO()
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter("Scatterplot of average link times per successful run")
self.reporter(image)
self.reporter("The trend line has a current slope of %s" % m)
self.reporter("The y-intercept is %s seconds" % b)
def depthReport(self):
"""
Method to draw a histogram of the number of new links
discovered at each depth.
(i.e. show how many links are required to reach a link)
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
from numpy import arange
except ImportError:
return
self.reporter("Analysis of link depth")
fig = Figure(figsize=(4, 2.5))
# Draw a histogram
width = 0.9
rect = [0.12, 0.08, 0.9, 0.85]
ax = fig.add_axes(rect)
left = arange(len(self.linkDepth))
plot = ax.bar(left, self.linkDepth, width=width)
# Add the x axis labels
ax.set_xticks(left+(width*0.5))
ax.set_xticklabels(left)
chart = StringIO()
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter(image)
class ReportData():
"""
Class to hold the data collected from the smoke test ready for reporting
Instances of this class will be held in the dictionary results which will
be keyed on the url. This way, in an attempt to minimise the memory used,
the url doesn't need to be stored in this class.
The class will have the following properties
broken: boolean
exception: boolean
http_code: integer
ticket: URL of any ticket linked with this url
parent: the parent URL of this url
depth: how deep is this url
duration: how long did it take to get the url
"""
def is_broken(self):
if hasattr(self, "broken"):
return self.broken
return False
def threw_exception(self):
if hasattr(self, "exception"):
return self.exception
return False
def return_http_code(self):
if hasattr(self, "http_code"):
return self.http_code
return "-"
def the_ticket(self, html):
"""
Should only have a ticket if it is broken,
but won't always have a ticket to display.
"""
if hasattr(self, "ticket"):
if html:
return "<a href=%s target=\"_blank\">Ticket</a>" % (self.ticket)
else:
return "Ticket: %s" % (self.ticket)
return "no ticket"
def get_parent(self):
if hasattr(self, "parent"):
return self.parent
return ""
def get_depth(self):
if hasattr(self, "depth"):
return self.depth
return 0
def get_duration(self):
if hasattr(self, "duration"):
return self.duration
return 0
| mit |
hitszxp/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
draperjames/bokeh | examples/howto/interactive_bubble/gapminder.py | 3 | 4218 | import io
from jinja2 import Template
import pandas as pd
from bokeh.core.properties import field
from bokeh.embed import file_html
from bokeh.layouts import column
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d, LinearAxis, HoverTool, Text,
SingleIntervalTicker, CustomJS, Slider, CategoricalColorMapper, Legend,
LegendItem,
)
from bokeh.models.annotations import Title
from bokeh.palettes import Spectral6
from bokeh.resources import JSResources
from bokeh.util.browser import view
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
sources = {}
region_name = regions_df.Group
region_name.name = 'region'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_name], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title=Title(text=''),
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
min_border=20,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(ticker=SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# ### Add the background year text
# We add this first so it is below all the other glyphs
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
plot.add_layout(Legend(items=[LegendItem(label=field('region'), renderers=[circle_renderer])]))
# Add the slider
code = """
var year = slider.value,
sources = %s,
new_source_data = sources[year].data;
renderer_source.data = new_source_data;
text_source.data = {'year': [String(year)]};
""" % js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback, name='testy')
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
layout = column(plot, slider)
# Open our custom template
with open('gapminder_template.jinja', 'r') as f:
template = Template(f.read())
# Use inline resources, render the html and open
js_resources = JSResources(mode='inline')
title = "Bokeh - Gapminder Bubble Plot"
html = file_html(layout, resources=(js_resources, None), title=title, template=template)
output_file = 'gapminder.html'
with io.open(output_file, mode='w', encoding='utf-8') as f:
f.write(html)
view(output_file)
| bsd-3-clause |
scotgl/sonify | ver_dev/pichart.py | 6 | 1425 | from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display
from gtts import gTTS
import os
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import pandas as pd
import ctcsound
pan = 0
index = 10
cs = ctcsound.Csound()
csd = '''
<CsoundSynthesizer>
<CsOptions>
-odac -d
</CsOptions>
<CsInstruments>
sr = 44100
ksmps = 32
nchnls = 2
0dbfs = 1
instr 1
;aMod1 poscil 200, 700, 1
aMod1 poscil p4, p5, 1 ; p4 = amp1, p5 = f1, p6 = amp2, p7 = f2
;aMod2 poscil 1800, 290, 1
aMod2 poscil p6, p7, 1
kenv linen p9 , 0.3 , p3, p9
aSig poscil kenv, 440+aMod1+aMod2, 1
outs aSig*(1-p8), aSig*p8
endin
</CsInstruments>
<CsScore>
f 0 14400
f 1 0 1024 10 1
</CsScore>
</CsoundSynthesizer>
'''
cs.compileCsdText(csd)
cs.start()
pt = ctcsound.CsoundPerformanceThread(cs.csound())
pt.play()
def pan_sonify(percentage):
global index
index = percentage
in_min = 10
in_max = 100
out_min=690
out_max = 710
tts = gTTS(text=(str(index)+'percent'), lang='en')
tts.save("num.mp3")
os.system("afplay num.mp3")
if (index>50):
pan = 1
if (index<50):
pan = 0
if (index==50):
pan = 0.5
freq = (index - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
print(freq)
pt.scoreEvent(False, 'i', (1, 0, 4, 200, 700, 200, freq, pan, 0.5))
| gpl-3.0 |
awohns/selection | python_lib/lib/python3.4/site-packages/numpy/lib/npyio.py | 23 | 73862 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| mit |
math4youbyusgroupillinois/givinggraph | setup.py | 3 | 1147 | from setuptools import setup, command
import os
import sys
import urllib
import tarfile
'''setuptools works by triggering subcommands from higher level commands.
The default commands 'install' and 'develop' trigger the following sequences:
install:
1. build
2. build_py
3. install_lib
4. install_egg_info
5. egg_info
6. install_scripts
develop:
1. egg_info
2. build_ext
'''
setup(
name='givinggraph',
version='0.0.1',
packages=['givinggraph'],
install_requires=[
'BeautifulSoup',
'celery',
'flask',
'gensim',
'goose-extractor', # see dependency link since not in pypi
# 'matplotlib', # ignoring for now to see if travis stops complaining
'networkx',
'sqlalchemy',
'scikit-learn',
],
dependency_links=[
'http://github.com/grangier/python-goose/tarball/master/#egg=goose-extractor-1.0.1'
],
entry_points={
'console_scripts': [
'givinggraph-api = givinggraph.api.app:main',
],
},
tests_require=[
'nose',
'pep8',
'pyflakes',
],
test_suite='tests',
)
| mit |
CASIANICA/brainDecodingToolbox | braincode/prf/vim1_pro.py | 1 | 32785 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import numpy as np
import tables
import bob.ip.gabor
from joblib import Parallel, delayed
from sklearn import linear_model
from braincode.util import configParser
from braincode.math import make_2d_gaussian, ridge
from braincode.math.norm import zscore
from braincode.prf import dataio
from braincode.prf import util as vutil
from braincode.pipeline import retinotopy
def check_path(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path, 0755)
def get_gabor_features(img):
"""Get Gabor features from input image."""
img = img.astype(np.float64)
gwt = bob.ip.gabor.Transform(number_of_scales=9)
trafo_img = gwt(img)
out_feat = np.zeros((72, 500, 500))
for i in range(trafo_img.shape[0]):
real_p = np.real(trafo_img[i, ...])
imag_p = np.imag(trafo_img[i, ...])
out_feat[i, ...] = np.sqrt(np.square(real_p)+np.square(imag_p))
return out_feat
def get_stim_features(db_dir, feat_dir, data_type):
"""Stimuli processing."""
sti_dir = os.path.join(db_dir, 'stimuli')
prefix = {'train': 'Stimuli_Trn_FullRes', 'val': 'Stimuli_Val_FullRes'}
if data_type == 'train':
for i in range(15):
mat_file = os.path.join(sti_dir, prefix['train']+'_%02d.mat'%(i+1))
print 'Load file %s ...'%(mat_file)
tf = tables.open_file(mat_file)
imgs = tf.get_node('/stimTrn')[:]
tf.close()
# output matrix: image-number x channel x row x col
print 'image size %s'%(imgs.shape[2])
out_features = np.zeros((imgs.shape[2], 72, 500, 500))
for j in range(imgs.shape[2]):
x = imgs[..., j].T
out_features[j, ...] = get_gabor_features(x)
out_file = prefix['train']+'_%02d_gabor_features.npy'%(i+1)
out_file = os.path.join(feat_dir, out_file)
np.save(out_file, out_features)
else:
mat_file = os.path.join(sti_dir, prefix['val']+'.mat')
print 'Load file %s ...'%(mat_file)
tf = tables.open_file(mat_file)
imgs = tf.get_node('/stimVal')[:]
# output matrix: image-number x channel x row x col
out_features = np.zeros((imgs.shape[2], 72, 500, 500))
for j in range(imgs.shape[2]):
x = imgs[..., j].T
out_features[j, ...] = get_gabor_features(x)
out_file = prefix['val']+'_gabor_features.npy'
out_file = os.path.join(feat_dir, out_file)
np.save(out_file, out_features)
def get_candidate_model(feat_dir, data_type):
"""Get gaussian kernel based on receptivefield features."""
# derived gauusian-kernel based features
# candidate pooling centers are spaces 0.4 degrees apart (10 pixels)
# candidate pooling fields included 17 radii (1, 5, 10, 15, 20, ...,
# 55, 60, 70, 80, 90, 100 pixels) between 0.04 degree (1 pixel) and 4
# degree (100 pixels)
prefix = {'train': 'Stimuli_Trn_FullRes', 'val': 'Stimuli_Val_FullRes'}
if data_type=='train':
time_count = 0
for i in range(15):
tmp_file = os.path.join(feat_dir,
prefix['train']+'_%02d_gabor_features.npy'%(i+1))
tmp = np.load(tmp_file, mmap_mode='r')
time_count += tmp.shape[0]
out_file = os.path.join(feat_dir,
'train_candidate_model_%02d.npy'%(i+1))
cand_model = np.memmap(out_file, dtype='float16', mode='w+',
shape=(50*50*17, tmp.shape[0], 72))
Parallel(n_jobs=4)(delayed(model_pro)(tmp, cand_model, xi, yi, si)
for si in range(17) for xi in range(50) for yi in range(50))
# save memmap object as a numpy.array
model_array = np.array(cand_model)
np.save(out_file, model_array)
# merge parts
print 'Time series length: %s'%(time_count)
out_file = os.path.join(feat_dir, 'train_candidate_model.npy')
cand_model = np.memmap(out_file, dtype='float16', mode='w+',
shape=(50*50*17, time_count, 72))
c = 0
for i in range(15):
pf = os.path.join(feat_dir, 'train_candidate_model_%02d.npy'%(i+1))
data = np.load(pf)
cand_model[:, c:(c+data.shape[1]), :] = data
c += data.shape[1]
# save memmap object as a numpy.array
model_array = np.array(cand_model)
np.save(out_file, model_array)
else:
tmp_file = os.path.join(feat_dir, prefix['val']+'_gabor_features.npy')
tmp = np.load(tmp_file, mmap_mode='r')
time_count = tmp.shape[0]
out_file = os.path.join(feat_dir, '%s_candidate_model.npy'%(data_type))
cand_model = np.memmap(out_file, dtype='float16', mode='w+',
shape=(50*50*17, time_count, 72))
Parallel(n_jobs=4)(delayed(model_pro)(tmp, cand_model, xi, yi, si)
for si in range(17) for xi in range(50) for yi in range(50))
# save memmap object as a numpy.array
model_array = np.array(cand_model)
np.save(out_file, model_array)
def model_pro(feat, cand_model, xi, yi, si):
"""Sugar function for generating candidate model."""
mi = si*50*50 + xi*50 + yi
center_x = np.arange(5, 500, 10)
center_y = np.arange(5, 500, 10)
sigma = [1] + [n*5 for n in range(1, 13)] + [70, 80, 90, 100]
x0 = center_x[xi]
y0 = center_y[yi]
s = sigma[si]
print 'Model %s : center - (%s, %s), sigma %s'%(mi, y0, x0, s)
kernel = make_2d_gaussian(500, s, center=(x0, y0))
kernel = kernel.flatten()
idx_head = 0
parts = feat.shape[0] / 10
for i in range(parts):
tmp = feat[(i*10):(i*10+10), ...]
tmp = tmp.reshape(720, 250000)
res = tmp.dot(kernel).astype(np.float16)
cand_model[mi, idx_head:(idx_head+10), ...] = res.reshape(10, 72)
idx_head += 10
def get_candidate_model_new(db_dir, data_type):
"""Get gaussian kernel based on receptivefield features."""
# derived gauusian-kernel based features
# candidate pooling centers are spaces 0.4 degrees apart (5 pixels)
# candidate pooling fields included 10 radii (2, 4, 8, 16, 32, 60,
# 70, 80, 90, 100 pixels) between 0.16 degree (0.08 per pixel) and 8
# degree (100 pixels)
img_num = {'train': 1750, 'val': 120}
feat_file = os.path.join(db_dir, data_type+'_gabor_feat.memdat')
feat = np.memmap(feat_file, dtype='float32', mode='r',
shape=(img_num[data_type], 250, 250, 72))
out_file = os.path.join(db_dir, '%s_candidate_model.npy'%(data_type))
cand_model = np.memmap(out_file, dtype='float16', mode='w+',
shape=(50*50*10, img_num[data_type], 72))
Parallel(n_jobs=4)(delayed(model_pro_new)(feat, cand_model, xi, yi, si)
for si in range(10) for xi in range(50) for yi in range(50))
# save memmap object as a numpy.array
model_array = np.array(cand_model)
np.save(out_file, model_array)
def model_pro_new(feat, cand_model, xi, yi, si):
"""Sugar function for generating candidate model."""
mi = si*50*50 + xi*50 + yi
center_x = np.arange(2, 250, 5)
center_y = np.arange(2, 250, 5)
sigma = [2, 4, 8, 16, 32, 60, 70, 80, 90, 100]
x0 = center_x[xi]
y0 = center_y[yi]
s = sigma[si]
print 'Model %s : center - (%s, %s), sigma %s'%(mi, y0, x0, s)
kernel = make_2d_gaussian(250, s, center=(x0, y0))
kernel = kernel.flatten()
idx_head = 0
parts = feat.shape[0] / 10
for i in range(parts):
tmp = feat[(i*10):(i*10+10), ...]
tmp = np.transpose(tmp, (0, 3, 1, 2))
tmp = tmp.reshape(720, 62500)
res = tmp.dot(kernel).astype(np.float16)
cand_model[mi, idx_head:(idx_head+10), ...] = res.reshape(10, 72)
idx_head += 10
def get_vxl_idx(prf_dir, db_dir, subj_id, roi):
"""Get voxel index in specific ROI"""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
print 'Voxel number: %s'%(len(vxl_idx))
roi_dir = os.path.join(prf_dir, roi)
check_path(roi_dir)
np.save(os.path.join(roi_dir, 'vxl_idx.npy'), vxl_idx)
def ridge_fitting(feat_dir, prf_dir, db_dir, subj_id, roi):
"""pRF model fitting using ridge regression.
90% trainning data used for model tuning, and another 10% data used for
model seletion.
"""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del val_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load candidate models
train_models = np.load(os.path.join(feat_dir, 'train_candidate_model.npy'),
mmap_mode='r')
# output directory config
roi_dir = os.path.join(prf_dir, roi)
check_path(roi_dir)
# model seletion and tuning
ALPHA_NUM = 20
BOOTS_NUM = 15
paras_file = os.path.join(roi_dir, 'reg_paras.npy')
paras = np.memmap(paras_file, dtype='float64', mode='w+',
shape=(42500, len(vxl_idx), 72))
mcorr_file= os.path.join(roi_dir, 'reg_model_corr.npy')
mcorr = np.memmap(mcorr_file, dtype='float64', mode='w+',
shape=(42500, len(vxl_idx)))
alphas_file = os.path.join(roi_dir, 'reg_alphas.npy')
alphas = np.memmap(alphas_file, dtype='float64', mode='w+',
shape=(42500, len(vxl_idx)))
# fMRI data z-score
print 'fmri data temporal z-score'
m = np.mean(train_fmri_ts, axis=1, keepdims=True)
s = np.std(train_fmri_ts, axis=1, keepdims=True)
train_fmri_ts = (train_fmri_ts - m) / (1e-10 + s)
# split training dataset into model tunning set and model selection set
tune_fmri_ts = train_fmri_ts[:, :int(1750*0.9)]
sel_fmri_ts = train_fmri_ts[:, int(1750*0.9):]
# model testing
for i in range(42500):
print 'Model %s'%(i)
# remove models which centered outside the 20 degree of visual angle
xi = (i % 2500) / 50
yi = (i % 2500) % 50
x0 = np.arange(5, 500, 10)[xi]
y0 = np.arange(5, 500, 10)[yi]
d = np.sqrt(np.square(x0-250)+np.square(y0-250))
if d > 249:
print 'Model center outside the visual angle'
paras[i, ...] = np.NaN
mcorr[i] = np.NaN
alphas[i] = np.NaN
continue
train_x = np.array(train_models[i, ...]).astype(np.float64)
train_x = zscore(train_x.T).T
# split training dataset into model tunning and selection sets
tune_x = train_x[:int(1750*0.9), :]
sel_x = train_x[int(1750*0.9):, :]
wt, r, alpha, bscores, valinds = ridge.bootstrap_ridge(
tune_x, tune_fmri_ts.T, sel_x, sel_fmri_ts.T,
alphas=np.logspace(-2, 3, ALPHA_NUM),
nboots=BOOTS_NUM, chunklen=175, nchunks=1,
single_alpha=False, use_corr=False)
paras[i, ...] = wt.T
mcorr[i] = r
alphas[i] = alpha
# save output
paras = np.array(paras)
np.save(paras_file, paras)
mcorr = np.array(mcorr)
np.save(mcorr_file, mcorr)
alphas = np.array(alphas)
np.save(alphas_file, alphas)
def ridge_regression(prf_dir, db_dir, subj_id, roi):
"""pRF model fitting using ridge regression.
90% trainning data used for model tuning, and another 10% data used for
model seletion.
"""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del val_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load candidate models
train_models = np.load(os.path.join(db_dir, 'train_candidate_model.npy'),
mmap_mode='r')
# output directory config
roi_dir = os.path.join(prf_dir, roi)
check_path(roi_dir)
# model seletion and tuning
ALPHA_NUM = 10
paras_file = os.path.join(roi_dir, 'reg_paras.npy')
paras = np.memmap(paras_file, dtype='float64', mode='w+',
shape=(len(vxl_idx), 73))
val_r2_file= os.path.join(roi_dir, 'reg_val_r2.npy')
val_r2 = np.memmap(val_r2_file, dtype='float64', mode='w+',
shape=(len(vxl_idx), 25000, ALPHA_NUM))
alphas_file = os.path.join(roi_dir, 'reg_alphas.npy')
alphas = np.memmap(alphas_file, dtype='float64', mode='w+',
shape=(len(vxl_idx)))
# fMRI data z-score
print 'fmri data temporal z-score'
m = np.mean(train_fmri_ts, axis=1, keepdims=True)
s = np.std(train_fmri_ts, axis=1, keepdims=True)
train_fmri_ts = (train_fmri_ts - m) / (1e-5 + s)
# split training dataset into model tunning set and model selection set
tune_fmri_ts = train_fmri_ts[:, :int(1750*0.9)]
sel_fmri_ts = train_fmri_ts[:, int(1750*0.9):]
# model fitting
for i in range(len(vxl_idx)):
print '-----------------'
print 'Voxel %s'%(i)
for j in range(25000):
#print 'Model %s'%(j)
# remove models which centered outside the 20 degree of visual angle
xi = (j % 2500) / 50
yi = (j % 2500) % 50
x0 = np.arange(2, 250, 5)[xi]
y0 = np.arange(2, 250, 5)[yi]
d = np.sqrt(np.square(x0-125)+np.square(y0-125))
if d > 124:
#print 'Model center outside the visual angle'
paras[i, ...] = np.NaN
val_r2[i, j, :] = np.NaN
continue
train_x = np.array(train_models[j, ...]).astype(np.float64)
# split training dataset into model tunning and selection sets
tune_x = train_x[:int(1750*0.9), :]
sel_x = train_x[int(1750*0.9):, :]
for a in range(ALPHA_NUM):
alpha_list = np.logspace(-2, 3, ALPHA_NUM)
# model fitting
reg = linear_model.Ridge(alpha=alpha_list[a])
reg.fit(tune_x, tune_fmri_ts[i])
val_pred = reg.predict(sel_x)
ss_tol = np.var(sel_fmri_ts[i]) * 175
r2 = 1.0 - np.sum(np.square(sel_fmri_ts[i] - val_pred))/ss_tol
val_r2[i, j, a] = r2
# select best model
vxl_r2 = np.nan_to_num(val_r2[i, ...])
sel_mdl_i, sel_alpha_i = np.unravel_index(vxl_r2.argmax(), vxl_r2.shape)
train_x = np.array(train_models[sel_mdl_i, ...]).astype(np.float64)
# split training dataset into model tunning and selection sets
tune_x = train_x[:int(1750*0.9), :]
sel_x = train_x[int(1750*0.9):, :]
alpha_list = np.logspace(-2, 3, ALPHA_NUM)
# selected model fitting
reg = linear_model.Ridge(alpha=alpha_list[sel_alpha_i])
reg.fit(tune_x, tune_fmri_ts[i])
val_pred = reg.predict(sel_x)
ss_tol = np.var(sel_fmri_ts[i]) * 175
r2 = 1.0 - np.sum(np.square(sel_fmri_ts[i] - val_pred))/ss_tol
print 'r-square recal: %s'%(r2)
#print 'r-square cal: %s'%(vxl_r2.max())
paras[i, ...] = np.concatenate((np.array([reg.intercept_]), reg.coef_))
alphas[i] = alpha_list[sel_alpha_i]
# save output
paras = np.array(paras)
np.save(paras_file, paras)
val_r2 = np.array(val_r2)
np.save(val_r2_file, val_r2)
alphas = np.array(alphas)
np.save(alphas_file, alphas)
def ridge_regression_model_test(prf_dir, db_dir, subj_id, roi):
"""Test pRF model derived from ridge regression with test dataset."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load candidate models
val_models = np.load(os.path.join(db_dir, 'val_candidate_model.npy'),
mmap_mode='r')
# fMRI data z-score
print 'fmri data temporal z-score'
m = np.mean(val_fmri_ts, axis=1, keepdims=True)
s = np.std(val_fmri_ts, axis=1, keepdims=True)
val_fmri_ts = (val_fmri_ts - m) / (1e-5 + s)
# output directory config
roi_dir = os.path.join(prf_dir, roi)
check_path(roi_dir)
# load selected models and the corresponding parameters
val_r2_file = os.path.join(roi_dir, 'reg_val_r2.npy')
val_r2 = np.load(val_r2_file, mmap_mode='r')
paras_file = os.path.join(roi_dir, 'reg_paras.npy')
paras = np.load(paras_file)
alphas_file = os.path.join(roi_dir, 'reg_alphas.npy')
alphas = np.load(alphas_file)
# output var
test_r2 = np.zeros(len(vxl_idx))
prf_pos = np.zeros((len(vxl_idx), 3))
# parameter candidates
alpha_list = np.logspace(-2, 3, 10)
sigma = [2, 4, 8, 16, 32, 60, 70, 80, 90, 100]
for i in range(len(vxl_idx)):
print '----------------'
print 'Voxel %s'%(i)
vxl_r2 = np.nan_to_num(val_r2[i, ...])
sel_mdl_i, sel_alpha_i = np.unravel_index(vxl_r2.argmax(), vxl_r2.shape)
print 'Select model %s'%(sel_mdl_i)
print 'Select alpha value %s - %s'%(alpha_list[sel_alpha_i], alphas[i])
# get model position info
xi = (sel_mdl_i % 2500) / 50
yi = (sel_mdl_i % 2500) % 50
x0 = np.arange(2, 250, 5)[xi]
y0 = np.arange(2, 250, 5)[yi]
s = sigma[sel_mdl_i / 2500]
prf_pos[i] = np.array([y0, x0, s])
# compute r^2 using test dataset
test_x = np.array(val_models[sel_mdl_i, ...]).astype(np.float64)
test_x = np.concatenate((np.ones((120, 1)), test_x), axis=1)
val_pred = test_x.dot(paras[i])
ss_tol = np.var(val_fmri_ts[i]) * 120
r2 = 1.0 - np.sum(np.square(val_fmri_ts[i] - val_pred))/ss_tol
print 'r-square on test dataset: %s'%(r2)
test_r2[i] = r2
# save output
np.save(os.path.join(roi_dir, 'reg_prf_test_r2.npy'), test_r2)
np.save(os.path.join(roi_dir, 'sel_reg_prf_pos.npy'), prf_pos)
def prf_selection(feat_dir, prf_dir, db_dir, subj_id, roi):
"""Select best model for each voxel and validating."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load candidate models
val_models = np.load(os.path.join(feat_dir, 'val_candidate_model.npy'),
mmap_mode='r')
# output directory config
roi_dir = os.path.join(prf_dir, roi)
# load candidate model parameters
paras = np.load(os.path.join(roi_dir, 'reg_paras.npy'))
mcorr = np.load(os.path.join(roi_dir, 'reg_model_corr.npy'))
alphas = np.load(os.path.join(roi_dir, 'reg_alphas.npy'))
sel_paras = np.zeros((mcorr.shape[1], 72))
sel_model = np.zeros(mcorr.shape[1])
sel_model_corr = np.zeros(mcorr.shape[1])
for i in range(mcorr.shape[1]):
maxi = np.argmax(np.nan_to_num(mcorr[:, i]))
print 'Voxel %s - Max corr %s - Model %s'%(i, mcorr[maxi, i], maxi)
print 'Alpha : %s'%(alphas[maxi, i])
sel_paras[i] = paras[maxi, i]
sel_model[i] = maxi
feats = np.array(val_models[maxi, ...]).astype(np.float64)
feats = zscore(feats.T).T
pred = np.dot(feats, sel_paras[i])
sel_model_corr[i] = np.corrcoef(pred, val_fmri_ts[i])[0, 1]
print 'Val Corr : %s'%(sel_model_corr[i])
np.save(os.path.join(roi_dir, 'reg_sel_paras.npy'), sel_paras)
np.save(os.path.join(roi_dir, 'reg_sel_model.npy'), sel_model)
np.save(os.path.join(roi_dir, 'reg_sel_model_corr.npy'), sel_model_corr)
def null_distribution_prf_tunning(feat_dir, prf_dir, db_dir, subj_id, roi):
"""Generate Null distribution of pRF model tunning using validation data."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load candidate models
val_models = np.load(os.path.join(feat_dir, 'val_candidate_model.npy'),
mmap_mode='r')
# output directory config
roi_dir = os.path.join(prf_dir, roi)
# load selected model parameters
paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
sel_model = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
null_corr = np.zeros((paras.shape[0], 1000))
for i in range(paras.shape[0]):
print 'Voxel %s'%(i)
# load features
feats = np.array(val_models[int(sel_model[i]), ...]).astype(np.float64)
feats = zscore(feats.T).T
pred = np.dot(feats, paras[i])
for j in range(1000):
shuffled_val_ts = np.random.permutation(val_fmri_ts[i])
null_corr[i, j] = np.corrcoef(pred, shuffled_val_ts)[0, 1]
np.save(os.path.join(roi_dir, 'random_corr.npy'), null_corr)
def gabor_contribution2prf(feat_dir, prf_dir, db_dir, subj_id, roi):
"""Calculate tunning contribution of each gabor sub-banks."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load candidate models
val_models = np.load(os.path.join(feat_dir, 'val_candidate_model.npy'),
mmap_mode='r')
# load selected model parameters
roi_dir = os.path.join(prf_dir, roi)
paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
sel_model = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
gabor_corr = np.zeros((paras.shape[0], 9))
for i in range(paras.shape[0]):
print 'Voxel %s'%(i)
# load features
feats = np.array(val_models[int(sel_model[i]), ...]).astype(np.float64)
feats = zscore(feats.T).T
for j in range(9):
pred = np.dot(feats[:, (j*8):(j*8+8)], paras[i, (j*8):(j*8+8)])
gabor_corr[i, j] = np.corrcoef(pred, val_fmri_ts[i])[0, 1]
np.save(os.path.join(roi_dir, 'gabor_contributes.npy'), gabor_corr)
def orient_selectivity(prf_dir, roi):
"""Calculate orientation selectivity index for each voxel."""
# load selected model parameters
roi_dir = os.path.join(prf_dir, roi)
paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
osi = np.zeros(paras.shape[0])
for i in range(paras.shape[0]):
print 'Voxel %s'%(i)
# load features
sel_paras = paras[i].reshape(9, 8)
orient = sel_paras.sum(axis=0)
osi[i] = orient.max() - (orient.sum() - orient.max())/7
np.save(os.path.join(roi_dir, 'orient_selectivity.npy'), osi)
def prf_recon(prf_dir, db_dir, subj_id, roi):
"""Reconstruct pRF based on selected model."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
del val_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# output directory config
roi_dir = os.path.join(prf_dir, roi)
# pRF estimate
sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
sel_paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
sel_model_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy'))
prfs = np.zeros((sel_models.shape[0], 500, 500))
fig_dir = os.path.join(roi_dir, 'figs')
check_path(fig_dir)
for i in range(sel_models.shape[0]):
# get pRF
print 'Voxel %s, Val Corr %s'%(i, sel_model_corr[i])
model_idx = int(sel_models[i])
# get gaussian pooling field parameters
si = model_idx / 2500
xi = (model_idx % 2500) / 50
yi = (model_idx % 2500) % 50
x0 = np.arange(5, 500, 10)[xi]
y0 = np.arange(5, 500, 10)[yi]
sigma = [1] + [n*5 for n in range(1, 13)] + [70, 80, 90, 100]
s = sigma[si]
kernel = make_2d_gaussian(500, s, center=(x0, y0))
kpos = np.nonzero(kernel)
paras = sel_paras[i]
for f in range(9):
fwt = np.sum(paras[(f*8):(f*8+8)])
fs = np.sqrt(2)**f*4
for p in range(kpos[0].shape[0]):
tmp = make_2d_gaussian(500, fs, center=(kpos[1][p],
kpos[0][p]))
prfs[i] += fwt * kernel[kpos[0][p], kpos[1][p]] * tmp
if sel_model_corr[i]>=0.24:
prf_file = os.path.join(fig_dir,'Voxel_%s_%s.png'%(i+1, vxl_idx[i]))
vutil.save_imshow(prfs[i], prf_file)
np.save(os.path.join(roi_dir, 'prfs.npy'), prfs)
def filter_recon(prf_dir, db_dir, subj_id, roi):
"""Reconstruct filter map of each voxel based on selected model."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
del val_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# output config
roi_dir = os.path.join(prf_dir, roi)
# pRF estimate
sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
sel_paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
sel_model_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy'))
filters = np.zeros((sel_models.shape[0], 500, 500))
fig_dir = os.path.join(roi_dir, 'filters')
check_path(fig_dir)
thr = 0.24
# gabor bank generation
gwt = bob.ip.gabor.Transform(number_of_scales=9)
gwt.generate_wavelets(500, 500)
spatial_gabors = np.zeros((72, 500, 500))
for i in range(72):
w = bob.ip.gabor.Wavelet(resolution=(500, 500),
frequency=gwt.wavelet_frequencies[i])
sw = bob.sp.ifft(w.wavelet.astype(np.complex128))
spatial_gabors[i, ...] = np.roll(np.roll(np.real(sw), 250, 0), 250, 1)
for i in range(sel_models.shape[0]):
if sel_model_corr[i]<thr:
continue
print 'Voxel %s, Val Corr %s'%(i, sel_model_corr[i])
model_idx = int(sel_models[i])
# get gaussian pooling field parameters
si = model_idx / 2500
xi = (model_idx % 2500) / 50
yi = (model_idx % 2500) % 50
x0 = np.arange(5, 500, 10)[xi]
y0 = np.arange(5, 500, 10)[yi]
sigma = [1] + [n*5 for n in range(1, 13)] + [70, 80, 90, 100]
s = sigma[si]
print 'center: %s, %s, sigma: %s'%(y0, x0, s)
kernel = make_2d_gaussian(500, s, center=(x0, y0))
kpos = np.nonzero(kernel>0.00000001)
paras = sel_paras[i]
tmp_file = os.path.join(fig_dir, 'tmp_kernel.npy')
tmp_filter = np.memmap(tmp_file, dtype='float64', mode='w+',
shape=(72, 500, 500))
Parallel(n_jobs=25)(delayed(filter_pro)(tmp_filter, paras, kernel,
kpos, spatial_gabors, gwt_idx)
for gwt_idx in range(72))
tmp_filter = np.array(tmp_filter)
filters[i] = tmp_filter.sum(axis=0)
os.system('rm %s'%(tmp_file))
im_file = os.path.join(fig_dir, 'Voxel_%s_%s.png'%(i+1, vxl_idx[i]))
vutil.save_imshow(filters[i], im_file)
np.save(os.path.join(roi_dir, 'filters.npy'), filters)
def filter_pro(tmp_filter, paras, kernel, kpos, spatial_gabors, gwt_idx):
data = np.zeros((500, 500))
wt = paras[gwt_idx]
arsw = spatial_gabors[gwt_idx]
for p in range(kpos[0].shape[0]):
tmp = img_offset(arsw, (kpos[0][p], kpos[1][p]))
data += wt * kernel[kpos[0][p], kpos[1][p]] * tmp
tmp_filter[gwt_idx] = data
def stimuli_recon(prf_dir, db_dir, subj_id, roi):
"""Reconstruct stimulus based on pRF model."""
# load fmri response
vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id,
roi=roi)
del train_fmri_ts
print 'Voxel number: %s'%(len(vxl_idx))
# load model parameters
roi_dir = os.path.join(prf_dir, roi)
val_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy'))
filters = np.load(os.path.join(roi_dir, 'filters.npy'))
recon_imgs = np.zeros((val_fmri_ts.shape[1], 500, 500))
# fMRI data z-score
print 'fmri data temporal z-score'
m = np.mean(val_fmri_ts, axis=1, keepdims=True)
s = np.std(val_fmri_ts, axis=1, keepdims=True)
val_fmri_ts = (val_fmri_ts - m) / (1e-10 + s)
# select significant predicted voxels
sel_vxls = np.nonzero(val_corr>=0.24)[0]
for i in range(val_fmri_ts.shape[1]):
print 'Reconstruct stimilus %s'%(i+1)
tmp = np.zeros((500, 500))
for j in sel_vxls:
tmp += val_fmri_ts[int(j), int(i)] * filters[j]
recon_imgs[i] = tmp
np.save(os.path.join(roi_dir, 'recon_img.npy'), recon_imgs)
def retinotopic_mapping(prf_dir, roi):
"""Get eccentricity and angle based on pRF center for each voxel."""
roi_dir = os.path.join(prf_dir, roi)
# load selected model index
sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
# output variables
ecc = np.zeros_like(sel_models)
angle = np.zeros_like(sel_models)
coords = np.zeros((sel_models.shape[0], 2))
for i in range(sel_models.shape[0]):
print 'Voxel %s'%(i+1)
model_idx = int(sel_models[i])
xi = (model_idx % 2500) / 50
yi = (model_idx % 2500) % 50
# col
x0 = np.arange(5, 500, 10)[xi]
# row
y0 = np.arange(5, 500, 10)[yi]
coords[i] = [y0, x0]
# get ecc and angle
ecc = retinotopy.coord2ecc(coords, 500, 20)
angle = retinotopy.coord2angle(coords, 500)
np.save(os.path.join(roi_dir, 'ecc.npy'), ecc)
np.save(os.path.join(roi_dir, 'angle.npy'), angle)
def img_offset(orig_img, new_center):
"""Move original image to new position based on new center coordinate.
new_center is (x0, y0), x0 indicates row coordinate, y0 indicates col
coordinate.
"""
img_r, img_c = orig_img.shape
new_img = np.zeros_like(orig_img)
# move image position based on new center coordinates
old_x0 = img_r // 2
old_y0 = img_c // 2
offset0 = int(np.rint(new_center[0] - old_x0))
offset1 = int(np.rint(new_center[1] - old_y0))
pixs = np.mgrid[0:img_r, 0:img_c].reshape(2, img_r*img_c)
new_x = pixs[0] + offset0
new_y = pixs[1] + offset1
pix_idx = (new_x>=0) * (new_x<img_r) * (new_y>=0) * (new_y<img_c)
new_img[new_x[pix_idx], new_y[pix_idx]] = orig_img[pixs[0, pix_idx],
pixs[1, pix_idx]]
return new_img
if __name__ == '__main__':
"""Main function."""
# config parser
cf = configParser.Config('config')
# database directory config
#db_dir = os.path.join(cf.get('database', 'path'), 'vim1')
# directory config for analysis
root_dir = cf.get('base', 'path')
feat_dir = os.path.join(root_dir, 'sfeatures', 'vim1')
res_dir = os.path.join(root_dir, 'subjects')
db_dir = os.path.join(root_dir, 'db', 'vim1')
# get gabor features
#get_stim_features(db_dir, feat_dir, 'train')
# get candidate models
#get_candidate_model(feat_dir, 'val')
#get_candidate_model_new(db_dir, 'train')
#-- general config
subj_id = 1
roi = 'v1'
# directory config
subj_dir = os.path.join(res_dir, 'vim1_S%s'%(subj_id))
prf_dir = os.path.join(subj_dir, 'regress_prf')
#-- pRF model fitting
# pRF model tunning
#get_vxl_idx(prf_dir, db_dir, subj_id, roi)
#ridge_fitting(feat_dir, prf_dir, db_dir, subj_id, roi)
#prf_selection(feat_dir, prf_dir, db_dir, subj_id, roi)
#ridge_regression(prf_dir, db_dir, subj_id, roi)
ridge_regression_model_test(prf_dir, db_dir, subj_id, roi)
# get null distribution of tunning performance
#null_distribution_prf_tunning(feat_dir, prf_dir, db_dir, subj_id, roi)
# calculate tunning contribution of each gabor sub-banks
#gabor_contribution2prf(feat_dir, prf_dir, db_dir, subj_id, roi)
# pRF reconstruction
#prf_recon(prf_dir, db_dir, subj_id, roi)
# filter reconstruction
#filter_recon(prf_dir, db_dir, subj_id, roi)
# validation stimuli reconstruction
#stimuli_recon(prf_dir, db_dir, subj_id, roi)
# retinotopic mapping
#retinotopic_mapping(prf_dir, roi)
| bsd-3-clause |
skdaccess/skdaccess | skdaccess/geo/grace/data_fetcher.py | 2 | 9674 | # The MIT License (MIT)
# Copyright (c) 2016 Massachusetts Institute of Technology
#
# Authors: Victor Pankratius, Justin Li, Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# """@package GRACE
# Provides classes for accessing GRACE data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from skdaccess.utilities.grace_util import readTellusData, getStartEndDate
# standard library imports
import re
from ftplib import FTP
import os
import glob
from collections import OrderedDict
from configparser import NoSectionError, NoOptionError
from glob import glob
from math import floor
# 3rd party package imports
import pandas as pd
import numpy as np
from tqdm import tqdm
class DataFetcher(DataFetcherStorage):
''' Data Fetcher for GRACE data '''
def __init__(self, ap_paramList, start_date = None, end_date = None):
'''
Construct a Grace Data Fetcher
@param ap_paramList[geo_point]: AutoList of geographic location tuples (lat,lon)
@param start_date: Beginning date
@param end_date: Ending date
'''
self.start_date = start_date
self.end_date = end_date
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create data wrapper of grace data for specified geopoints.
@return Grace Data Wrapper
'''
conf = DataFetcher.getConfig()
try:
data_location = conf.get('grace', 'data_location')
csr_filename = conf.get('grace', 'csr_filename')
jpl_filename = conf.get('grace', 'jpl_filename')
gfz_filename = conf.get('grace', 'gfz_filename')
scale_factor_filename = conf.get('grace', 'scale_factor_filename')
except (NoOptionError, NoSectionError) as exc:
print('No data information available, please run: skdaccess grace')
raise exc
geo_point_list = self.ap_paramList[0]()
csr_data, csr_meta, lat_bounds, lon_bounds = readTellusData(os.path.join(data_location, csr_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'CSR','time')
jpl_data, jpl_meta, = readTellusData(os.path.join(data_location, jpl_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'JPL','time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
gfz_data, gfz_meta, = readTellusData(os.path.join(data_location, gfz_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'GFZ','time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
scale_factor_data, scale_factor_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude', 'SCALE_FACTOR',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
leakage_error_data, leakage_error_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude', 'LEAKAGE_ERROR',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
measurement_error_data, measurement_error_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude',
'MEASUREMENT_ERROR', lat_bounds=lat_bounds,
lon_bounds=lon_bounds)[:2]
# Get appropriate time range
start_date = self.start_date
end_date = self.end_date
def getMaskedValue(in_value):
'''
Retrieve the value if not masked,
otherwise return np.nan
@param in_value: Input value to check
@return input value or nan
'''
if np.ma.is_masked(in_value):
return np.nan
else:
return in_value
if start_date == None or end_date == None:
csr_start_date, csr_end_date = getStartEndDate(csr_data)
jpl_start_date, jpl_end_date = getStartEndDate(jpl_data)
gfz_start_date, gfz_end_date = getStartEndDate(gfz_data)
if start_date == None:
start_date = np.min([csr_start_date, jpl_start_date, gfz_start_date])
if end_date == None:
end_date = np.max([csr_end_date, jpl_end_date, gfz_end_date])
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for (csr_label, csr_frame), (jpl_label, jpl_frame), (gfz_label, gfz_frame) in zip(csr_data.items(),
jpl_data.items(),
gfz_data.items()):
data = pd.concat([csr_frame.loc[start_date:end_date],
jpl_frame.loc[start_date:end_date],
gfz_frame.loc[start_date:end_date]], axis=1)
data.index.name = 'Date'
label = csr_label
metadata_dict[label] = pd.Series({'scale_factor' : getMaskedValue(scale_factor_data[csr_label]),
'measurement_error' : getMaskedValue(measurement_error_data[csr_label]),
'leakage_error' : getMaskedValue(leakage_error_data[csr_label])})
data_dict[label] = data
metadata_frame = pd.DataFrame.from_dict(metadata_dict)
return(TableWrapper(data_dict,meta_data = metadata_frame,default_columns=['CSR','JPL','GFZ']))
def __str__(self):
'''
String representation of data fetcher
@return String listing the name and geopoint of data fetcher
'''
return 'Grace Data Fetcher' + super(DataFetcher, self).__str__()
@classmethod
def downloadFullDataset(cls, out_file = 'grace.h5', use_file = None):
'''
Download and parse data from the Gravity Recovery and Climate Experiment.
@param out_file: Output filename for parsed data
@param use_file: Directory of already downloaded data. If None, data will be downloaded.
@return Absolute path of parsed data
'''
# Get date of grace data from filename
def setConfigFile(filename):
if re.search('SCALE_FACTOR', filename):
DataFetcher.setDataLocation('grace', filename, key='scale_factor_filename')
elif re.search('CSR', filename):
DataFetcher.setDataLocation('grace', filename, key='csr_filename')
elif re.search('GFZ', filename):
DataFetcher.setDataLocation('grace', filename, key='gfz_filename')
elif re.search('JPL', filename):
DataFetcher.setDataLocation('grace', filename, key='jpl_filename')
else:
return False
return True
if use_file is None:
print("Downloading GRACE Land Mass Data")
ftp = FTP("podaac-ftp.jpl.nasa.gov")
ftp.login()
ftp.cwd('/allData/tellus/L3/land_mass/RL05/netcdf')
dir_list = list(ftp.nlst(''))
file_list = [file for file in dir_list if re.search('.nc$', file)]
for filename in tqdm(file_list):
status = setConfigFile(filename)
if status == False:
print("Uknown file:", filename)
continue
ftp.retrbinary('RETR ' + filename, open(filename, 'wb').write)
ftp.quit()
DataFetcher.setDataLocation('grace', os.path.abspath('./'))
else:
files = glob(os.path.join(use_file, '*.nc'))
for filename in files:
status = setConfigFile(filename)
if status == False:
print('Unknown file')
DataFetcher.setDataLocation('grace', os.path.abspath(use_file))
| mit |
UNR-AERIAL/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
wohlert/agnosia | atone/preprocessing.py | 2 | 5672 | """
preprocessing
Provides routines for preprocessing of data.
"""
import numpy as np
from scipy.signal import savgol_filter
def scale(input_matrix: np.array) -> np.array:
"""
Scale the unit of measure from femtotesla to tesla.
"""
return input_matrix * 1e12
def normalise(input_matrix: np.array, axis=0) -> np.array:
"""
Normalises the data for input in certain classifiers.
"""
from scipy.stats import zscore
return zscore(input_matrix, axis=axis)
def min_max(x) -> np.array:
"""
Uses minmax normalisation to scale input to the interval of 0-1.
"""
return np.abs((np.min(x) - x) / (np.max(x) - np.min(x)))
def smooth(input_matrix: np.array, window: int=17, order: int=2) -> np.array:
"""
Apply Savitzky-Golay filtering to smooth the signal.
"""
assert window % 2 == 1, "Window size must be odd"
return savgol_filter(input_matrix, window, order)
def remove_baseline(input_matrix: np.array, start: int) -> np.array:
"""
Removes baseline noise from a signals.
"""
start = int(start)
baseline = np.mean(input_matrix[:, :, :start], axis=-1)
return input_matrix - baseline[:, :, None]
def dropout_channels_monte_carlo(input_matrix: np.array, output_labels: np.array) -> np.array:
"""
Perform 10 fold shuffle split on the data and
do cross validation to find channels with
highest correlation to the output variable.
"""
from sklearn.svm import SVC
clf = SVC(C=1, kernel='linear')
trials, channels, samples = np.shape(input_matrix)
def monte_carlo_channel(channel):
from sklearn.cross_validation import ShuffleSplit, cross_val_score
from .features import pool
cross_validation = ShuffleSplit(trials, n_iter=5, test_size=0.2)
input_pooled = pool(input_matrix[:, [channel]])
scores = cross_val_score(clf, input_pooled, output_labels, cv=cross_validation)
return np.mean(scores)
channel_list = np.arange(channels)
accuracies = np.array([monte_carlo_channel(c) for c in channel_list])
return accuracies
def dropout_channels_tanh(input_matrix: np.array) -> np.array:
"""
Finds channels to dropout based on the hyperbolic tangent
along with the standard deviation of these tangents.
! input must be normalised.
"""
tangents = np.tanh(input_matrix)
cross_sample = np.std(tangents, axis=0)
cross_trial = np.mean(cross_sample, axis=1)
return cross_trial > 0.4
def dropout_channels_norm(input_matrix: np.array, threshold: float=0.05) -> np.array:
"""
Identifies channels with a low signal-to-noise ratio (snr)
and returns a list of the channels with quality higher than
`threshold` of all signals.
"""
from scipy.special import ndtr
trials, channels, _ = np.shape(input_matrix)
snr_channels = {}
for trial in range(trials):
for channel in range(channels):
samples = input_matrix[trial, channel]
mu = np.mean(samples)
sigma = np.std(samples)
# Signal to noise ratio
snr = mu/sigma
if channel not in snr_channels or snr_channels[channel] > snr:
snr_channels[channel] = snr
ratios = list(snr_channels.values())
point_estimate = np.mean(ratios)
standard_dev = np.std(ratios)
def approved(x) -> bool:
"""
Function to measure of value is above p = threshold
"""
zscore = (point_estimate - x)/standard_dev
if ndtr(zscore) >= threshold:
return True
return False
valid_channels = [k for k, v in snr_channels.items() if approved(v)]
return np.array(valid_channels)
def get_magnetometers(file: str) -> np.array:
"""
Returns the input matrix with only the data
from the magnetometers.
Expected no. of magnetometers: 102
"""
meters = np.load(file)
find_magnetometers = np.vectorize(lambda x: bool(x.endswith("1")))
(magnetometers,) = np.where(find_magnetometers(meters))
return magnetometers
def get_gradiometers(file: str) -> np.array:
"""
Returns the input matrix with only the data
from the gradiometers.
Expected no. of gradiometers: 204
"""
meters = np.load(file)
find_gradiometers = np.vectorize(lambda x: bool(not x.endswith("1")))
(gradiometers,) = np.where(find_gradiometers(meters))
return gradiometers
def keep_channels(input_matrix: np.array, type: str) -> np.array:
"""
Remove channels from matrix that are
not contained in channels.
"""
channels = None
if type == "gradiometers":
channels = get_gradiometers("channel_names.npy")
elif type == "magnetometers":
channels = get_magnetometers("channel_names.npy")
return input_matrix[:, channels, :]
def cut(input_matrix: np.array, start: int, end: int=None) -> np.array:
"""
Removes samples before a given point,
such as before stimuli.
Can also trim from both sides.
"""
_, _, samples = np.shape(input_matrix)
start = int(start)
if end != None:
end = int(end)
assert start < samples
return input_matrix[:, :, start:end].copy()
def cut_m170(input_matrix: np.array, start: float, sfreq: int, window_size: float=5.0) -> np.array:
"""
Cuts the samples around M170.
window_size is the number of ms before and after n170.
"""
window = window_size*0.01
impulse = abs(start)
prime = impulse + 0.170
nmin = prime - window
nmax = prime + window
area = range(int(nmin*sfreq), int(nmax*sfreq))
return input_matrix[:, :, area].copy()
| apache-2.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps28/src/evaluation.py | 56 | 43426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import sys
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
return numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1) + self.eps)
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth,
labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i][
'event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar,
percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall[
'Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
NunoEdgarGub1/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.