repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
maheshakya/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
Neurita/bamboo | bamboo/features.py | 1 | 2014 | """Features: Utilities related with pandas DataFrame features.
"""
# Author: Borja Ayerdi <[email protected]>
# License: BSD 3 clause
# Copyright: UPV/EHU
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.cross_validation import LeaveOneOut
from sklearn.ensemble import ExtraTreesClassifier
def get_feature_importance(X,Y):
"""
Given data matrix (X) and labels (Y) we get feature importance using
Leave One Out and ExtraTrees classifier.
Parameters
----------
X: pd.DataFrame
DataFrame with data (n_samples, n_features)
Y: pd.DataFrame
DataFrame with labels (n_samples, 1)
Returns
-------
pd.DataFrame
DataFrame with feature importance values.
"""
# Leave One Out
K = len(Y)
vAcc = []
loo = LeaveOneOut(n=K)
yy = np.zeros(len(Y))
feat_imp = np.zeros((1,X.shape[1]))
for train, test in loo:
x_train, x_test, y_train, y_test = X[train,:], X[test,:], Y[train], Y[test]
# We correct NaN values in x_train and x_test
nan_mean = stats.nanmean(x_train)
nan_train = np.isnan(x_train)
nan_test = np.isnan(x_test)
x_test[nan_test] = 0
x_test = x_test + nan_test*nan_mean
x_train[nan_train] = 0
x_train = x_train + nan_train*nan_mean
# Compute mean, std and noise for z-score
std = np.std(x_train,axis=0)
med = np.mean(x_train,axis=0)
noise = [np.random.uniform(-0.000005, 0.000005) for p in range(0,x_train.shape[1])]
# Apply Z-score
x_train = (x_train-med)/(std+noise)
x_test = (x_test-med)/(std+noise)
# Classifier type.
classifier = ExtraTreesClassifier()
classifier = classifier.fit(x_train, y_train)
feat_imp_np = np.array(classifier.feature_importances_)
feat_imp = feat_imp + feat_imp_np
res = np.around(feat_imp/x_train.shape[0], decimals=4)
return pd.DataFrame(res)
| bsd-3-clause |
teonlamont/mne-python | mne/tests/test_dipole.py | 2 | 17400 | import os
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal, assert_equal
import pytest
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events)
from mne.dipole import get_phantom_dipoles
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import run_tests_if_main, _TempDir, requires_mne, run_subprocess
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.io.constants import FIFF
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_path = testing.data_path(download=False)
meg_path = op.join(data_path, 'MEG', 'sample')
fname_dip_xfit = op.join(meg_path, 'sample_audvis-ave_xfit.dip')
fname_raw = op.join(meg_path, 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(meg_path, 'sample_audvis_trunc_set1.dip')
fname_evo = op.join(meg_path, 'sample_audvis_trunc-ave.fif')
fname_evo_full = op.join(meg_path, 'sample_audvis-ave.fif')
fname_cov = op.join(meg_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(meg_path, 'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(meg_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert_equal(orig.name, new.name)
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert_equal(len(dip), n_dipoles)
assert_equal(dip.pos.shape, (n_dipoles, 3))
assert_equal(dip.ori.shape, (n_dipoles, 3))
assert_equal(dip.gof.shape, (n_dipoles,))
assert_equal(dip.amplitude.shape, (n_dipoles,))
@testing.requires_testing_data
def test_io_dipoles():
"""Test IO for .dip files."""
tempdir = _TempDir()
dipole = read_dipole(fname_dip)
print(dipole) # test repr
out_fname = op.join(tempdir, 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True)
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting():
"""Test dipole fitting."""
amp = 100e-9
tempdir = _TempDir()
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True,
use_cps=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
with pytest.warns(RuntimeWarning, match='projection'):
dip, residuals = fit_dipole(evoked, cov, sphere, fname_fwd)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
assert (data_rms > resi_rms * 0.95).all(), \
'%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
assert_equal(fwd['src'][0]['coord_frame'], FIFF.FIFFV_COORD_HEAD)
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
out = dip.crop(dip_c.times[0], dip_c.times[-1])
assert (dip is out)
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did about as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
if os.getenv('TRAVIS', 'false').lower() == 'true' and \
'OPENBLAS_NUM_THREADS' in os.environ:
# XXX possibly some OpenBLAS numerical differences make
# things slightly worse for us
factor = 0.7
else:
factor = 0.8
assert dists[0] / factor >= dists[1], 'dists: %s' % dists
assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
'gc-dists (ori): %s' % gc_dists
assert amp_errs[0] / factor >= amp_errs[1],\
'amplitude errors: %s' % amp_errs
# This one is weird because our cov/sim/picking is weird
assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
@testing.requires_testing_data
def test_dipole_fitting_fixed():
"""Test dipole fitting with a fixed position."""
import matplotlib.pyplot as plt
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert_equal(len(evoked_crop.times), 1)
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert (isinstance(dip_seq, Dipole))
assert_equal(len(dip_seq.times), 1)
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert (isinstance(dip_free, Dipole))
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert (np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid, resid_free[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert (isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid, resid_fixed[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed)
# bad resetting
evoked.info['bads'] = [evoked.ch_names[3]]
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
# Degenerate conditions
evoked_nan = evoked.copy().crop(0, 0)
evoked_nan.data[0, 0] = None
pytest.raises(ValueError, fit_dipole, evoked_nan, cov, sphere)
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
# copying
dip_fixed_2 = dip_fixed.copy()
dip_fixed_2.data[:] = 0.
assert not np.isclose(dip_fixed.data, 0., atol=1e-20).any()
# plotting
plt.close('all')
dip_fixed.plot()
plt.close('all')
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert (min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
pytest.raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
evoked.info.normalize_proj()
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert ((np.percentile(ds, [50, 90]) < [0.0005, perc_90]).all())
@testing.requires_testing_data
def test_dipole_fixed():
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
# print the representation of the object DipoleFixed
print(dip)
_check_roundtrip_fixed(dip)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip):
"""Check roundtrip IO for fixed dipoles."""
tempdir = _TempDir()
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times)
assert_equal(dip_read.info['xplotter_layout'], dip.info['xplotter_layout'])
assert_equal(dip_read.ch_names, dip.ch_names)
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert_equal(ch_1['ch_name'], ch_2['ch_name'])
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
def test_get_phantom_dipoles():
"""Test getting phantom dipole locations."""
pytest.raises(ValueError, get_phantom_dipoles, 0)
pytest.raises(ValueError, get_phantom_dipoles, 'foo')
for kind in ('vectorview', 'otaniemi'):
pos, ori = get_phantom_dipoles(kind)
assert_equal(pos.shape, (32, 3))
assert_equal(ori.shape, (32, 3))
@testing.requires_testing_data
def test_confidence():
"""Test confidence limits."""
tempdir = _TempDir()
evoked = read_evokeds(fname_evo_full, 'Left Auditory', baseline=(None, 0))
evoked.crop(0.08, 0.08).pick_types() # MEG-only
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.04), 0.08)
dip_py = fit_dipole(evoked, cov, sphere)[0]
fname_test = op.join(tempdir, 'temp-dip.txt')
dip_py.save(fname_test)
dip_read = read_dipole(fname_test)
with pytest.warns(RuntimeWarning, match="'noise/ft/cm', 'prob'"):
dip_xfit = read_dipole(fname_dip_xfit)
for dip_check in (dip_py, dip_read):
assert_allclose(dip_check.pos, dip_xfit.pos, atol=5e-4) # < 0.5 mm
assert_allclose(dip_check.gof, dip_xfit.gof, atol=5e-1) # < 0.5%
assert_array_equal(dip_check.nfree, dip_xfit.nfree) # exact match
assert_allclose(dip_check.khi2, dip_xfit.khi2, rtol=2e-2) # 2% miss
assert_equal(set(dip_check.conf.keys()), set(dip_xfit.conf.keys()))
for key in sorted(dip_check.conf.keys()):
assert_allclose(dip_check.conf[key], dip_xfit.conf[key],
rtol=1.5e-1, err_msg=key)
run_tests_if_main(False)
| bsd-3-clause |
leifdenby/numpy | numpy/lib/npyio.py | 35 | 71412 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
evgchz/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 15 | 33321 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
"""Regression test: max_features didn't work correctly in 0.14."""
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('l1', 'l2'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
cpicanco/player_plugins | self_contained/animation_test.py | 1 | 1645 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2016 Rafael Picanço.
The present file is distributed under the terms of the GNU General Public License (GPL v3.0).
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
clusters = np.load('/home/rafael/documents/doutorado/data_doc/003-Natan/2015-05-13/precision_report/data_ordered_by_trial.npy')
i = 0
all_gaze = []
for cluster in clusters:
by_trial = []
for gaze in cluster:
by_trial.append(gaze['norm_pos'])
by_trial = np.vstack(by_trial)
MX = np.mean(by_trial[:,0])
MY = np.mean(by_trial[:,1])
by_trial[:,0] = MX - by_trial[:,0]
by_trial[:,1] = MY - by_trial[:,1]
all_gaze.append(by_trial)
im = fig.add_axes([.08, .08, .85, (1/1.67539267016)], axisbg=(.2, .2, .2, .3))
im.spines['top'].set_visible(False)
im.spines['bottom'].set_visible(False)
im.spines['left'].set_visible(False)
im.spines['right'].set_visible(False)
im.xaxis.set_ticks_position('none')
def load_standard_frame():
plt.ylim(ymax=.2, ymin=-.2)
plt.xlim(xmax=.2, xmin=-.2)
def updatefig(*args):
global all_gaze,i
if i < len(all_gaze):
data = np.vstack(all_gaze[i])
X = data[:,0]
Y = data[:,1]
im.plot(X, Y, '.')
i+=1
else:
i=0
im.clear()
load_standard_frame()
return im,
print 1280./764.
ani = animation.FuncAnimation(fig, updatefig, interval=200, blit=True)
load_standard_frame()
plt.show() | gpl-3.0 |
SpectreJan/gnuradio | gr-filter/examples/fft_filter_ccc.py | 47 | 4363 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fft_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw0, bw1, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw0 = bw0
self._bw1 = bw1
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.complex_band_pass_2(1, self._fs,
self._bw0, self._bw1,
self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fft_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-S", "--start-pass", type="eng_float", default=1000,
help="Start of Passband [default=%default]")
parser.add_option("-E", "--end-pass", type="eng_float", default=2000,
help="End of Passband [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fft_filter_ccc(options.nsamples,
options.samplerate,
options.start_pass,
options.end_pass,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
AlexRobson/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
basp-group/purify | scripts/padmm_plot_SNR_and_time.py | 1 | 8860 | import os
import multiprocessing
import time
import numpy as np
import pyfits
import glob
import subprocess
import matplotlib.pyplot as plt
image = "30dor_256"
n_tests = 10
input_SNR = 30
def kernel_settings(kernel):
J = 4
oversample = 2
if kernel == "pswf":
J = 6
if kernel == "kb_interp":
oversample = 1.375
J = 5
if kernel == "box":
J = 1
if kernel == "kb_min":
oversample = 1.375
J = 5
if kernel == "kb_min4":
oversample = 1.375
J = 4
kernel = "kb_min"
if kernel == "kb_interp4":
oversample = 1.375
J = 4
kernel = "kb_interp"
return J, oversample, kernel
def run_test_padmm_reweighted((i, kernel, M_N_ratio, start_time, input_SNR, image)):
time.sleep(start_time)
J, oversample, kernel = kernel_settings(kernel)
os.system("screen -S "+ kernel + "_" + str(i) + " -d -m " +
"../build/cpp/example/padmm_reweighted_simulation " + kernel + " "
+ str(oversample) + " " +str(J) + " " +str(M_N_ratio) + " " + str(i) + " "+str(input_SNR)+ " " + str(image))
results_file = "../build/outputs/"+image+"_results_" + kernel + "_" + str(i) + ".txt"
while not os.path.exists(results_file):
time.sleep(1)
results = np.loadtxt(results_file, dtype = str)
SNR = results[0]
total_time = results[1]
os.system("rm " + results_file)
return [float(SNR), float(total_time)]
def run_test_padmm((i, kernel, M_N_ratio, start_time, input_SNR, image)):
time.sleep(start_time)
J, oversample, kernel = kernel_settings(kernel)
os.system("screen -S "+ kernel + "_" + str(i) + " -d -m " +
"../build/cpp/example/padmm_simulation padmm " + kernel + " "
+ str(oversample) + " " +str(J) + " " +str(M_N_ratio) + " " + str(i) + " "+str(input_SNR)+ " " + str(image))
results_file = "../build/outputs/"+image+"_results_" + kernel + "_" + str(i) + ".txt"
while not os.path.exists(results_file):
time.sleep(1)
results = np.loadtxt(results_file, dtype = str)
SNR = results[0]
total_time = results[1]
converged = results[2]
niters = results[3]
os.system("rm " + results_file)
return [float(SNR), float(total_time), int(converged), float(niters)]
def run_test_ms_clean((i, kernel, M_N_ratio, start_time, input_SNR, image)):
time.sleep(start_time)
J, oversample, kernel = kernel_settings(kernel)
os.system("screen -S "+ kernel + "_" + str(i) + " -d -m " +
"../build/cpp/example/padmm_simulation ms_clean " + kernel + " "
+ str(oversample) + " " +str(J) + " " +str(M_N_ratio) + " " + str(i) + " "+str(input_SNR)+ " " + str(image))
results_file = "../build/outputs/"+image+"_results_" + kernel + "_" + str(i) + ".txt"
while not os.path.exists(results_file):
time.sleep(1)
results = np.loadtxt(results_file, dtype = str)
SNR = results[0]
total_time = results[1]
converged = results[2]
niters = results[3]
os.system("rm " + results_file)
return [float(SNR), float(total_time), int(converged), float(niters)]
def run_test_clean((i, kernel, M_N_ratio, start_time, input_SNR, image)):
time.sleep(start_time)
J, oversample, kernel = kernel_settings(kernel)
os.system("screen -S "+ kernel + "_" + str(i) + " -d -m " +
"../build/cpp/example/padmm_simulation clean " + kernel + " "
+ str(oversample) + " " +str(J) + " " +str(M_N_ratio) + " " + str(i) + " "+str(input_SNR)+ " " + str(image))
results_file = "../build/outputs/"+image+"_results_" + kernel + "_" + str(i) + ".txt"
while not os.path.exists(results_file):
time.sleep(1)
results = np.loadtxt(results_file, dtype = str)
SNR = results[0]
total_time = results[1]
converged = results[2]
niters = results[3]
os.system("rm " + results_file)
return [float(SNR), float(total_time), int(converged), float(niters)]
def collect_data(args, results, M_N_ratios, kernel):
meantempSNR = []
errortempSNR = []
meantempTime = []
errortempTime = []
meantempIters = []
errortempIters = []
totaltempConverges = []
for m in M_N_ratios:
tempSNR = []
tempTime = []
tempIters = []
tempConverges = []
for i in range(len(args)):
if m == args[i][2]:
if args[i][1] == kernel:
tempSNR.append(results[i][0])
tempTime.append(results[i][1])
tempConverges.append(results[i][2])
tempIters.append(results[i][3])
tempSNR = np.array(tempSNR)
tempTime = np.array(tempTime)
tempConverges = np.array(tempConverges)
tempIters = np.array(tempIters)
meantempSNR.append(tempSNR.mean())
errortempSNR.append(tempSNR.std())
meantempTime.append(tempTime.mean())
errortempTime.append(tempTime.std())
meantempIters.append(tempIters.mean())
errortempIters.append(tempIters.std())
totaltempConverges.append(tempConverges.sum())
return meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges
def create_plots(args, results, M_N_ratios, name, kernels, colours,legend = []):
for k in range(len(kernels)):
meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges = collect_data(args, results, M_N_ratios, kernels[k])
plt.errorbar(M_N_ratios, meantempSNR, errortempSNR, fmt='', c = colours[k])
if len(legend) > 0:
plt.legend(legend)
plt.xlabel("M/N")
plt.ylabel("SNR, db")
plt.xlim(0, 2.2)
plt.ylim(5, 40)
plt.savefig(name + "_SNR_plot.pdf")
plt.clf()
for k in range(len(kernels)):
meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges = collect_data(args, results, M_N_ratios, kernels[k])
plt.errorbar(M_N_ratios, meantempTime, errortempTime, fmt='', c = colours[k])
plt.xlabel("M/N")
plt.ylabel("Time (seconds)")
plt.xlim(0, 2.2)
plt.savefig(name + "_Time_plot.pdf")
plt.clf()
for k in range(len(kernels)):
meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges = collect_data(args, results, M_N_ratios, kernels[k])
plt.errorbar(M_N_ratios, meantempIters, errortempIters, fmt='', c = colours[k])
plt.xlabel("M/N")
plt.ylabel("Iterations")
plt.xlim(0, 2.2)
plt.ylim(0, 110)
plt.savefig(name + "_Iterations_plot.pdf")
plt.clf()
for k in range(len(kernels)):
meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges = collect_data(args, results, M_N_ratios, kernels[k])
plt.scatter(M_N_ratios, totaltempConverges, c = colours[k])
plt.xlabel("M/N")
plt.ylabel("Number of converging tests")
plt.ylim(0, 11)
plt.xlim(0, 2.2)
plt.savefig(name + "_Converges_plot.pdf")
plt.clf()
for k in range(len(kernels)):
meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges = collect_data(args, results, M_N_ratios, kernels[k])
x = np.column_stack((meantempSNR, errortempSNR, meantempTime, errortempTime, meantempIters, errortempIters, totaltempConverges))
np.savetxt(name + '_' + kernels[k], x)
if __name__ == '__main__':
M_N_ratios = np.arange(1, 11) * 0.2
args = []
test_num = 0
#kernels = ["kb", "kb_interp", "pswf", "gauss", "box", "gauss_alt", "kb_min", "kb_min4"]#, "kb_interp4"]
kernels = ["kb", "pswf", "gauss", "gauss_alt", "box"]
total_tests = n_tests * len(kernels) * len(M_N_ratios)
for i in range(1, n_tests + 1):
for k in kernels:
for m in M_N_ratios:
test_num = test_num + 1
args.append((test_num, k, m, test_num * 1./ total_tests * 30., input_SNR, image))
print test_num
n_processors = multiprocessing.cpu_count() + 1
p = multiprocessing.Pool(min(n_processors, 1)) # Limiting the number of processes used to 40, otherwise it will cause problems with the user limit
#legend = ["Kaiser Bessel (KB)", "KB (Linear-interp, Min-oversample)", "PSWF", "Gaussian (Optimal)", "Box", "Gaussian (non-Optimal)", "KB (Min-oversample)", "KB4 (Min-oversample)"]#, "KB4 (Linear-interp, Min-oversample)"]
#colours = ['blue', 'red', 'black', 'green', 'magenta', 'cyan', 'yellow', "#800000"]#, "#808000"]
legend = ["Kaiser Bessel (KB)", "PSWF", "Gaussian (Optimal)", "Gaussian (non-Optimal)", "Box"]
colours = ['blue', 'black', 'green', 'cyan', 'magenta']
results = p.map(run_test_padmm, args)
create_plots(args, results, M_N_ratios, image + "_padmm_" + str(input_SNR) + "_", kernels, colours, legend)
print "PADMM Done!"
results = p.map(run_test_ms_clean, args)
create_plots(args, results, M_N_ratios, image + "_ms_clean" + str(input_SNR) + "_", kernels, colours)
print "MS CLEAN Done!"
results = p.map(run_test_clean, args)
create_plots(args, results, M_N_ratios, image + "_clean" + str(input_SNR) + "_", kernels, colours)
print "CLEAN Done!"
#results = p.map(run_test_padmm_reweighted, args)
#create_plots(args, results, M_N_ratios, image + "_padmm_reweighted" + str(input_SNR) + "_", kernels, colours, legend)
#print "PADMM REWEIGHTED Done!"
| gpl-2.0 |
stuliveshere/PySeis | PySeis/processing/processing.py | 1 | 18245 | import numpy as np
import toolbox
import pylab
from scipy.signal import butter, lfilter, convolve2d
from scipy.interpolate import RectBivariateSpline as RBS
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
import matplotlib.patches as patches
import numpy.ma as ma
import sys
import warnings
warnings.filterwarnings("ignore")
class DraggablePoint:
lock = None #only one can be animated at a time
def __init__(self, point):
self.point = point
self.press = None
self.background = None
def connect(self):
'connect to all the events we need'
self.cidpress = self.point.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.point.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.point.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def on_press(self, event):
if event.button == 3:
if event.inaxes != self.point.axes: return
if DraggablePoint.lock is not None: return
contains, attrd = self.point.contains(event)
if not contains: return
self.press = (self.point.center), event.xdata, event.ydata
DraggablePoint.lock = self
# draw everything but the selected rectangle and store the pixel buffer
canvas = self.point.figure.canvas
axes = self.point.axes
self.point.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.point.axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self.point)
# and blit just the redrawn area
canvas.blit(axes.bbox)
def on_motion(self, event):
if DraggablePoint.lock is not self:
return
if event.inaxes != self.point.axes: return
self.point.center, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.point.center = (self.point.center[0]+dx, self.point.center[1]+dy)
canvas = self.point.figure.canvas
axes = self.point.axes
# restore the background region
canvas.restore_region(self.background)
# redraw just the current rectangle
axes.draw_artist(self.point)
# blit just the redrawn area
canvas.blit(axes.bbox)
def on_release(self, event):
'on release we reset the press data'
if DraggablePoint.lock is not self:
return
self.press = None
DraggablePoint.lock = None
# turn off the rect animation property and reset the background
self.point.set_animated(False)
self.background = None
# redraw the full figure
self.point.figure.canvas.draw()
def disconnect(self):
'disconnect all the stored connection ids'
self.point.figure.canvas.mpl_disconnect(self.cidpress)
self.point.figure.canvas.mpl_disconnect(self.cidrelease)
self.point.figure.canvas.mpl_disconnect(self.cidmotion)
def initialise(file, memmap=False, scan=False):
#intialise empty parameter dictionary
#kwargs stands for keyword arguments
kwargs = {}
#load file
if memmap == True:
ns = np.fromfile(file, dtype=toolbox.su_header_dtype, count=1)['ns']
sutype = toolbox.typeSU(ns)
dataset = np.memmap(file, dtype=sutype)
else:
dataset = toolbox.read(file)
#allocate stuff
#~
ns = kwargs['ns'] = dataset['ns'][0]
dt = kwargs['dt'] = dataset['dt'][0]/1e6
#also add the time vector - it's useful later
kwargs['times'] = np.arange(0, dt*ns, dt)
dataset['trace'] /= np.amax(dataset['trace'])
dataset['tracr'] = np.arange(dataset.size)
kwargs['primary'] = 'cdp'
kwargs['secondary'] = 'offset'
kwargs['cdp'] = np.sort(np.unique(dataset['cdp']))
kwargs['step'] = 1
if scan:
toolbox.scan(dataset)
return dataset, kwargs
def tar(data, **kwargs):
#pull some values out of the
#paramter dictionary
gamma = kwargs['gamma']
t = kwargs['times']
#calculate the correction coeffieicnt
r = np.exp(gamma * t)
#applyt the correction to the data
data['trace'] *= r
return data
def apply_statics(data, **kwargs):
for trace in data:
shift = trace['tstat']/(kwargs['dt']*1000).astype(np.int)
if shift > 0:
trace['trace'][-shift:] = 0
if shift < 0:
trace['trace'][:-shift] = 0
trace['trace'] = np.roll(trace['trace'] , shift)
return data
def build_vels(vels, **kwargs):
from scipy import interpolate
cdps = np.array(kwargs['cdp'])
times = np.array(kwargs['times'])
keys = vels.keys()
x = []
t = []
values = []
for i in vels.items():
cdp = i[0]
picks= i[1]
for pick in picks:
x.append(cdp)
t.append(pick[1])
values.append(pick[0])
grid_x, grid_y = np.meshgrid(cdps, times)
#top left
x.append(min(cdps))
t.append(min(times))
values.append(min(values))
#top right
t.append(min(times))
x.append(max(cdps))
values.append(min(values))
#bottom left
x.append(min(cdps))
t.append(max(times))
values.append(max(values))
#bottom right
t.append(max(times))
x.append(max(cdps))
values.append(max(values))
zi = pylab.griddata(x, t, values, grid_x, grid_y, interp='linear')
return zi.T
def _nmo_calc(tx, vels, offset):
'''calculates the zero offset time'''
t0 = np.sqrt(tx*tx - (offset*offset)/(vels*vels))
return t0
def old_nmo(dataset, **kwargs):
if 'smute' not in kwargs.keys(): kwargs['smute'] = 10000.
ns = kwargs['ns']
dt = kwargs['dt']
tx = kwargs['times']
minCdp = np.amin(dataset['cdp'])
counter = 0
ntraces = dataset.size
print "moving out %d traces" %ntraces
result = dataset.copy()
result['trace'] *= 0
for i in range(dataset.size):
trace = dataset[i]
counter += 1
if counter > 1000:
ntraces -= counter
counter = 0
print ntraces
aoffset = np.abs(trace['offset'].astype(np.float))
cdp = trace['cdp']
vel = kwargs['vels'][cdp - minCdp]
#calculate time shift for each sample in trac
t0 = _nmo_calc(tx, vel, aoffset)
t0 = np.nan_to_num(t0)
#calculate stretch between each sample
stretch = 100.0*(np.pad(np.diff(t0),(0,1), 'reflect')-dt)/dt
mute = kwargs['smute']
filter = [(stretch >0.0) & ( stretch < mute)]
#interpolate
result[i]['trace'] = np.interp(tx, t0, trace['trace']) * filter
return result
def nmo(dataset, **kwargs):
dataset.sort(order='cdp')
cdps = np.unique(dataset['cdp'])
minCdp = cdps[0]
times = kwargs['times']
dt = kwargs['dt']
ns = kwargs['ns']
nt = dataset.shape[0]
traces = np.arange(nt)
cdp_columns = dataset['cdp'] - minCdp
vels = np.zeros_like(dataset['trace'])
for i in range(cdp_columns.size):
vels[i] = kwargs['vels'][cdp_columns[i]]
tx = np.ones(dataset['trace'].shape) * times
offset = dataset['offset'][:, None]
t0 = _nmo_calc(tx, vels, offset)
t0 = np.nan_to_num(t0)
shifts = np.ones(dataset['trace'].shape) * (ns * dt * traces[:, None])
tx += shifts
t0 += shifts
result = np.interp(tx.ravel(), t0.ravel(), dataset['trace'].flatten())
dataset['trace'] = result.reshape(nt, ns)
#calculate stretch between each sample
stretch = 100.0*(np.abs(t0 - np.roll(t0, 1, axis=-1))/dt)
stretch = np.nan_to_num(stretch)
mute = kwargs['smute'] * 1.0
filter = [(stretch >0.0) & ( stretch < mute)][0]
dataset['trace'] *= filter
return dataset
def axis_nmo(dataset, **kwargs):
pass
def _stack_gather(gather):
'''stacks a single gather into a trace.
uses header of first trace. normalises
by the number of nonzero samples'''
pilot = gather[np.argmin(gather['offset'])]
norm = gather['trace'].copy()
norm = np.nan_to_num(norm)
norm = norm **0
norm = np.sum(norm, axis=-2)
pilot['trace'] = np.sum(gather['trace'], axis=-2)/norm
return pilot
def stack(dataset, **kwargs):
cdps = np.unique(dataset['cdp'])
sutype = np.result_type(dataset)
result = np.zeros(cdps.size, dtype=sutype)
for index, cdp in enumerate(cdps):
gather = dataset[dataset['cdp'] == cdp]
trace = _stack_gather(gather)
result[index] = trace
return result
def semb(workspace,**kwargs):
print ''
def onclick(e):
if e.button == 1:
print "(%.1f, %.3f)," %(e.xdata, e.ydata),
w = np.abs(np.diff(ax.get_xlim())[0])/50.
h = np.abs(np.diff(ax.get_ylim())[0])/50.
circ= patches.Ellipse((e.xdata, e.ydata), width=w, height=h, fc='k')
ax.add_patch(circ)
dr = DraggablePoint(circ)
dr.connect()
drs.append(dr)
fig.canvas.draw()
vels = kwargs['velocities']
nvels = vels.size
ns = kwargs['ns']
result = np.zeros((nvels,ns),'f')
loc = np.mean(workspace['cdp'])
for v in range(nvels):
panel = workspace.copy()
kwargs['vels'] = np.ones(kwargs['ns'], 'f') * vels[v]
panel = nmo(panel, None, **kwargs)
norm = panel['trace'].copy()
norm[np.nonzero(norm)] = 1
n = np.sum(norm, axis=0)
a = np.sum(panel['trace'], axis=0)**2
b = n * np.sum(panel['trace']**2, axis=0)
window = kwargs['smoother']*1.0
kernel = np.ones(window)/window
a = np.convolve(a, kernel, mode='same')
b = np.convolve(b, kernel, mode='same')
result[v:] = np.sqrt(a/b)
pylab.imshow(result.T, aspect='auto', extent=(min(vels), max(vels),kwargs['ns']*kwargs['dt'],0.), cmap='jet')
pylab.xlabel('velocity')
pylab.ylabel('time')
pylab.title("cdp = %d" %np.unique(loc))
pylab.colorbar()
print "vels[%d]=" %loc,
fig = pylab.gcf()
ax = fig.gca()
fig.canvas.mpl_connect('button_press_event', onclick)
drs = []
pylab.show()
print ''
print "vels[%d]=" %loc,
for dr in drs:
print "(%.1f, %.3f)," %dr.point.center,
def _lmo_calc(aoffset, velocity):
t0 = -1.0*aoffset/velocity
return t0
def lmo(dataset, **kwargs):
offsets = np.unique(dataset['offset'])
for offset in offsets:
aoffset = np.abs(offset)
shift = _lmo_calc(aoffset, kwargs['lmo'])
shift = (shift*1000).astype(np.int)
inds= [dataset['offset'] == offset]
dataset['trace'][inds] = np.roll(dataset['trace'][inds], shift, axis=-1) #results[inds]
return dataset
def trace_mix(dataset, **kwargs):
ns = kwargs['ns']
window = np.ones(kwargs['mix'], 'f')/kwargs['mix']
for i in range(ns):
dataset['trace'][:,i] = np.convolve(dataset['trace'][:,i], window, mode='same')
return dataset
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def bandpass(dataset, **kwargs):
# Sample rate and desired cutoff frequencies (in Hz).
fs = 1./kwargs['dt']
lowcut = kwargs['lowcut']
highcut = kwargs['highcut']
dataset['trace'] = butter_bandpass_filter(np.fliplr(dataset['trace']), lowcut, highcut, fs, order=3)
dataset['trace'] = butter_bandpass_filter(np.fliplr(dataset['trace']), lowcut, highcut, fs, order=3)
return dataset
def fk_view(dataset, **kwargs):
mid= dataset.size/2
f = np.abs(np.fft.rfft2(dataset['trace']))
freq = np.fft.rfftfreq(kwargs['ns'], d=kwargs['dt'])
k = np.fft.rfftfreq(dataset.size, d=kwargs['dx'])
kmax = k[-1]
f[:mid] = f[:mid][::-1]
f[mid:] = f[mid:][::-1]
pylab.figure()
pylab.imshow(f.T, aspect='auto', extent=[-1*kmax, kmax, freq[-1], freq[0]])
pylab.colorbar()
def fk_design(dataset, **kwargs):
mid= dataset.size/2
f = np.abs(np.fft.rfft2(dataset['trace']))
freq = np.fft.rfftfreq(kwargs['ns'], d=kwargs['dt'])
k = np.fft.rfftfreq(dataset.size, d=kwargs['dx'])
k = k[:-1]
kmax = k[-1]
k_axis = np.hstack([k, k[::-1]])[:, None]
column, row = np.indices(f.shape)
row = row.astype(np.float)
column = column.astype(np.float)
column.fill(1.0)
row.fill(1.0)
row *= freq
column *= k_axis
m = row/column
m[:mid] = m[:mid][::-1]
m[mid:] = m[mid:][::-1]
mask = m > kwargs['fkVelocity']
m[mask] = 1
m[~mask] = 0
window = kwargs['fkSmooth']
vec= np.ones(window)/(window *1.0)
smoothed_m = np.apply_along_axis(lambda m: np.convolve(m, vec, mode='valid'), axis=-1, arr=m)
valid = smoothed_m.shape[-1]
m[:, :valid] = smoothed_m
pylab.figure()
pylab.imshow(m.T, aspect='auto', extent=[-1*kmax, kmax, freq[-1], freq[0]])
pylab.colorbar()
z = m.copy()
z[:mid] = z[:mid][::-1]
z[mid:] = z[mid:][::-1]
return z
def fk_filter(dataset, **kwargs):
for s in np.unique(dataset['fldr']):
shot = dataset['trace'][dataset['fldr'] == s]
filter = kwargs['fkFilter']
nt = shot.shape[0]
delta = abs(nt - filter.shape[0])
if delta > 0:
shot = np.vstack([shot, np.zeros_like(shot[:delta])])
f = np.fft.rfft2(shot)
result = np.fft.irfft2(f*filter)[:nt]
dataset['trace'] [dataset['fldr'] == s]= 0.0
dataset['trace'] [dataset['fldr'] == s]= result
return dataset
def trim(dataset, **kwargs):
dataset['tstat'] = 0
model = kwargs['model']
cdps = np.unique(model['cdp'])
start, end = (kwargs['gate'] /kwargs['dt']).astype(np.int)
centre = kwargs['ns']/2
m = kwargs['maxshift']
for cdp in cdps:
gather = dataset[dataset['cdp'] == cdp].copy()
gather['trace'][:,:start] = 0
gather['trace'][:,end:] = 0
pilot = model['trace'][model['cdp'] == cdp].ravel()
pilot[:start] = 0
pilot[end:] = 0
result = np.apply_along_axis(lambda m: np.correlate(m, pilot, mode='same'), axis=-1, arr=gather['trace'])
result[:,:centre-m] = 0
result[:,centre+m+1:] = 0
peaks = np.argmax(np.abs(result), axis=-1)
dataset['tstat'][dataset['cdp'] == cdp] = peaks
dataset['tstat'] -= centre.astype(np.int16)
dataset['tstat'] *= -1
return dataset
def xwigb(panel, key='offset'):
'''
looks like suxwigb
'''
axis = np.arange(panel['ns'][0])*panel['dt'][0]*1e-6
traces = panel['trace']
traces /= np.sqrt((traces ** 2).sum(1))[:,np.newaxis]
x, y = np.meshgrid(range(traces.shape[0]), range(traces.shape[1]))
traces += x.T
fig = pylab.figure()
for trace in traces:
pylab.plot(trace, axis,'k')
pylab.gca().invert_yaxis()
pylab.ylabel('Time(s)')
pylab.title('Trace')
pylab.gca().xaxis.tick_top()
pylab.show()
def ximage(data, agc=0):
'''
looks like suximage.
fix this to use the SU
headers for plotting
'''
if agc:
amp_func = agc_func(data=data,window=100)
data /= amp_func
fig = pylab.figure()
pylab.imshow(data.T, aspect='auto', vmin=-1, vmax=1, cmap='gist_yarg') #,
#extent=(min(panel['offset']), max(panel['offset']), panel['ns'][0]*(panel['dt'][0]*1e-6), 0))
pylab.xlabel('Offset')
pylab.ylabel('Time(s)')
pylab.show()
def agc_func(data, window):
vec = np.ones(window)/(window/2.)
func = np.apply_along_axis(lambda m: np.convolve(np.abs(m), vec, mode='same'), axis=1, arr=data)
print func
return func
| mit |
drammock/mne-python | mne/decoding/tests/test_transformer.py | 4 | 9210 | # Author: Mainak Jas <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pytest
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal)
from mne import io, read_events, Epochs, pick_types
from mne.decoding import (Scaler, FilterEstimator, PSDEstimator, Vectorizer,
UnsupervisedSpatialFilter, TemporalFilter)
from mne.defaults import DEFAULTS
from mne.utils import requires_sklearn, check_version
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
start, stop = 0, 8
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
@pytest.mark.parametrize('info, method', [
(True, None),
(True, dict(mag=5, grad=10, eeg=20)),
(False, 'mean'),
(False, 'median'),
])
def test_scaler(info, method):
"""Test methods of Scaler."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
y = epochs.events[:, -1]
epochs_data_t = epochs_data.transpose([1, 0, 2])
if method in ('mean', 'median'):
if not check_version('sklearn'):
with pytest.raises(ImportError, match='No module'):
Scaler(info, method)
return
if info:
info = epochs.info
scaler = Scaler(info, method)
X = scaler.fit_transform(epochs_data, y)
assert_equal(X.shape, epochs_data.shape)
if method is None or isinstance(method, dict):
sd = DEFAULTS['scalings'] if method is None else method
stds = np.zeros(len(picks))
for key in ('mag', 'grad'):
stds[pick_types(epochs.info, meg=key)] = 1. / sd[key]
stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg']
means = np.zeros(len(epochs.ch_names))
elif method == 'mean':
stds = np.array([np.std(ch_data) for ch_data in epochs_data_t])
means = np.array([np.mean(ch_data) for ch_data in epochs_data_t])
else: # median
percs = np.array([np.percentile(ch_data, [25, 50, 75])
for ch_data in epochs_data_t])
stds = percs[:, 2] - percs[:, 0]
means = percs[:, 1]
assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis],
epochs_data, rtol=1e-12, atol=1e-20, err_msg=method)
X2 = scaler.fit(epochs_data, y).transform(epochs_data)
assert_array_equal(X, X2)
# inverse_transform
Xi = scaler.inverse_transform(X)
assert_array_almost_equal(epochs_data, Xi)
# Test init exception
pytest.raises(ValueError, Scaler, None, None)
pytest.raises(TypeError, scaler.fit, epochs, y)
pytest.raises(TypeError, scaler.transform, epochs)
epochs_bad = Epochs(raw, events, event_id, 0, 0.01, baseline=None,
picks=np.arange(len(raw.ch_names))) # non-data chs
scaler = Scaler(epochs_bad.info, None)
pytest.raises(ValueError, scaler.fit, epochs_bad.get_data(), y)
def test_filterestimator():
"""Test methods of FilterEstimator."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
# Add tests for different combinations of l_freq and h_freq
filt = FilterEstimator(epochs.info, l_freq=40, h_freq=80)
y = epochs.events[:, -1]
X = filt.fit_transform(epochs_data, y)
assert (X.shape == epochs_data.shape)
assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)
filt = FilterEstimator(epochs.info, l_freq=None, h_freq=40,
filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto')
y = epochs.events[:, -1]
X = filt.fit_transform(epochs_data, y)
filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)
y = epochs.events[:, -1]
with pytest.warns(RuntimeWarning, match='longer than the signal'):
pytest.raises(ValueError, filt.fit_transform, epochs_data, y)
filt = FilterEstimator(epochs.info, l_freq=40, h_freq=None,
filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto')
X = filt.fit_transform(epochs_data, y)
# Test init exception
pytest.raises(ValueError, filt.fit, epochs, y)
pytest.raises(ValueError, filt.transform, epochs)
def test_psdestimator():
"""Test methods of PSDEstimator."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
psd = PSDEstimator(2 * np.pi, 0, np.inf)
y = epochs.events[:, -1]
X = psd.fit_transform(epochs_data, y)
assert (X.shape[0] == epochs_data.shape[0])
assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X)
# Test init exception
pytest.raises(ValueError, psd.fit, epochs, y)
pytest.raises(ValueError, psd.transform, epochs)
def test_vectorizer():
"""Test Vectorizer."""
data = np.random.rand(150, 18, 6)
vect = Vectorizer()
result = vect.fit_transform(data)
assert_equal(result.ndim, 2)
# check inverse_trasnform
orig_data = vect.inverse_transform(result)
assert_equal(orig_data.ndim, 3)
assert_array_equal(orig_data, data)
assert_array_equal(vect.inverse_transform(result[1:]), data[1:])
# check with different shape
assert_equal(vect.fit_transform(np.random.rand(150, 18, 6, 3)).shape,
(150, 324))
assert_equal(vect.fit_transform(data[1:]).shape, (149, 108))
# check if raised errors are working correctly
vect.fit(np.random.rand(105, 12, 3))
pytest.raises(ValueError, vect.transform, np.random.rand(105, 12, 3, 1))
pytest.raises(ValueError, vect.inverse_transform,
np.random.rand(102, 12, 12))
@requires_sklearn
def test_unsupervised_spatial_filter():
"""Test unsupervised spatial filter."""
from sklearn.decomposition import PCA
from sklearn.kernel_ridge import KernelRidge
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# Test estimator
pytest.raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2))
# Test fit
X = epochs.get_data()
n_components = 4
usf = UnsupervisedSpatialFilter(PCA(n_components))
usf.fit(X)
usf1 = UnsupervisedSpatialFilter(PCA(n_components))
# test transform
assert_equal(usf.transform(X).ndim, 3)
# test fit_transform
assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X))
assert_equal(usf.transform(X).shape[1], n_components)
assert_array_almost_equal(usf.inverse_transform(usf.transform(X)), X)
# Test with average param
usf = UnsupervisedSpatialFilter(PCA(4), average=True)
usf.fit_transform(X)
pytest.raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2)
def test_temporal_filter():
"""Test methods of TemporalFilter."""
X = np.random.rand(5, 5, 1200)
# Test init test
values = (('10hz', None, 100., 'auto'), (5., '10hz', 100., 'auto'),
(10., 20., 5., 'auto'), (None, None, 100., '5hz'))
for low, high, sf, ltrans in values:
filt = TemporalFilter(low, high, sf, ltrans, fir_design='firwin')
pytest.raises(ValueError, filt.fit_transform, X)
# Add tests for different combinations of l_freq and h_freq
for low, high in ((5., 15.), (None, 15.), (5., None)):
filt = TemporalFilter(low, high, sfreq=100., fir_design='firwin')
Xt = filt.fit_transform(X)
assert_array_equal(filt.fit_transform(X), Xt)
assert (X.shape == Xt.shape)
# Test fit and transform numpy type check
with pytest.raises(ValueError, match='Data to be filtered must be'):
filt.transform([1, 2])
# Test with 2 dimensional data array
X = np.random.rand(101, 500)
filt = TemporalFilter(l_freq=25., h_freq=50., sfreq=1000.,
filter_length=150, fir_design='firwin2')
assert_equal(filt.fit_transform(X).shape, X.shape)
| bsd-3-clause |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 27 | 11083 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of `Estimator` API (deprecated).
This module is deprecated. Please use
`tf.contrib.factorization.KMeansClustering` instead of
`tf.contrib.learn.KMeansClustering`. It has a similar interface, but uses the
`tf.estimator.Estimator` API instead of `tf.contrib.learn.Estimator`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.20/_downloads/a5d4e64d0843ff17526c0588f9967f97/plot_covariance_whitening_dspm.py | 5 | 6810 | """
===================================================
Demonstrate impact of whitening on source estimates
===================================================
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in [1]_. The example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in [1]_.
This example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
.. warning:: Please do not copy the patterns presented here for your own
analysis, this is example is purely illustrative.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
##############################################################################
# Get data
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw = raw.crop(0, 150.).load_data()
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(None, 20.)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(mag=3e-12)
# Make forward
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
del src
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
clim = dict(kind='value', lims=[0, 2.5, 5])
###############################################################################
# Estimate covariances
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
events_ = events_[np.argsort(events_[:, 0])]
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject,
decim=8)
epochs_train.equalize_event_counts(event_ids)
assert len(epochs_train) == 2 * n_train
# We know some of these have too few samples, so suppress warning
# with verbose='error'
noise_covs = compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True, rank=None, verbose='error') # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
del epochs_train, events_
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs:
inverse_operator = make_inverse_operator(evokeds[0].info, forward,
cov, loose=0.2, depth=0.8)
assert len(inverse_operator['sing']) == 274 # sanity check
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b
del raw, forward # save some memory
##############################################################################
# Show the resulting source estimates
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 5))
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim,
initial_time=0.175, background='w', foreground='k')
brain.show_view('ven')
im = brain.screenshot()
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set(title='{0} epochs'.format(n_train * 2),
xlabel='Time (ms)', ylabel='Source Activation (dSPM)',
xlim=(tmin * 1e3, tmax * 1e3), ylim=(-3, 3))
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.2, left=0.01, right=0.99, wspace=0.03)
| bsd-3-clause |
ua-snap/downscale | snap_scripts/epscor_sc/generate_raw_anoms_clims_cmip5_testing.py | 1 | 2516 | # WORK WITH MATT:
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray( lon )
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
if __name__ == '__main__':
import rasterio, os
import xarray as xr
import numpy as np
import pandas as pd
# some id variables
variable = 'tas'
model = 'GFDL-CM3'
scenario = 'rcp60'
begin = '1961'
end = '1990'
output_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/matt_mike_test'
# read em
historical = xr.open_dataset( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/{}/{}/{}/{}_{}_{}_r1i1p1_1860_2005.nc'.format( model, 'historical', variable, variable, model, 'historical' ) )
future = xr.open_dataset( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/{}/{}/{}/{}_{}_{}_r1i1p1_2006_2100.nc'.format( model, scenario, variable, variable, model, scenario ) )
# concat em
ds = xr.concat( [historical, future], dim='time' )
# make climatology
climatology = ds[variable].sel( time=slice( begin, end ) ).groupby( 'time.month' ).mean( 'time' )
# make anomalies
anomalies = ds[variable].groupby( 'time.month' ) - climatology
# slice anomalies back to just the future
anomalies = anomalies.sel( time=future.time )
# make some metadata
count, height, width = anomalies.shape
affine = transform_from_latlon( ds.lat, ds.lon )
meta = { 'crs':{'init':'epsg:4326'},
'count':count,
'height':height,
'width':width,
'driver':'GTiff',
'dtype':'float64',
'affine':affine }
output_filename = os.path.join( output_path, '{}_{}_{}_multiband_anomalies.tif'.format( variable, model, scenario ) )
# write anom to disk
with rasterio.open( output_filename, 'w', **meta ) as rst:
rst.write( anomalies.values )
# write climatologies to disk
# make some metadata
count, height, width = climatology.shape
affine = transform_from_latlon( ds.lat, ds.lon )
meta = { 'crs':{'init':'epsg:4326'},
'count':count,
'height':height,
'width':width,
'driver':'GTiff',
'dtype':'float64',
'affine':affine }
output_filename = os.path.join( output_path, '{}_{}_{}_multiband_climatology.tif'.format( variable, model, scenario ) )
# write anom to disk
with rasterio.open( output_filename, 'w', **meta ) as rst:
rst.write( climatology.values )
| mit |
381426068/MissionPlanner | Lib/site-packages/scipy/signal/fir_filter_design.py | 53 | 18572 | """Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| gpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/mpl_toolkits/axes_grid1/mpl_axes.py | 10 | 5045 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import matplotlib.axes as maxes
from matplotlib.artist import Artist
from matplotlib.axis import XAxis, YAxis
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
def toggle_axisline(self, b):
warnings.warn("toggle_axisline is not necessary and deprecated in axes_grid1")
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects(
[super(Axes.AxisDict, self).__getitem__(k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start is None and k.stop is None and k.step is None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
super(Axes, self).__init__(*kl, **kw)
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
self._axislines["bottom"] = SimpleAxisArtist(self.xaxis, 1, self.spines["bottom"])
self._axislines["top"] = SimpleAxisArtist(self.xaxis, 2, self.spines["top"])
self._axislines["left"] = SimpleAxisArtist(self.yaxis, 1, self.spines["left"])
self._axislines["right"] = SimpleAxisArtist(self.yaxis, 2, self.spines["right"])
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def cla(self):
super(Axes, self).cla()
self._init_axis_artists()
class SimpleAxisArtist(Artist):
def __init__(self, axis, axisnum, spine):
self._axis = axis
self._axisnum = axisnum
self.line = spine
if isinstance(axis, XAxis):
self._axis_direction = ["bottom", "top"][axisnum-1]
elif isinstance(axis, YAxis):
self._axis_direction = ["left", "right"][axisnum-1]
else:
raise ValueError("axis must be instance of XAxis or YAxis : %s is provided" % (axis,))
Artist.__init__(self)
def _get_major_ticks(self):
tickline = "tick%dline" % self._axisnum
return SimpleChainedObjects([getattr(tick, tickline) for tick \
in self._axis.get_major_ticks()])
def _get_major_ticklabels(self):
label = "label%d" % self._axisnum
return SimpleChainedObjects([getattr(tick, label) for tick \
in self._axis.get_major_ticks()])
def _get_label(self):
return self._axis.label
major_ticks = property(_get_major_ticks)
major_ticklabels = property(_get_major_ticklabels)
label = property(_get_label)
def set_visible(self, b):
self.toggle(all=b)
self.line.set_visible(b)
self._axis.set_visible(True)
Artist.set_visible(self, b)
def set_label(self, txt):
self._axis.set_label_text(txt)
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
tickOn = "tick%dOn" % self._axisnum
labelOn = "label%dOn" % self._axisnum
if _ticks is not None:
tickparam = {tickOn: _ticks}
self._axis.set_tick_params(**tickparam)
if _ticklabels is not None:
tickparam = {labelOn: _ticklabels}
self._axis.set_tick_params(**tickparam)
if _label is not None:
pos = self._axis.get_label_position()
if (pos == self._axis_direction) and not _label:
self._axis.label.set_visible(False)
elif _label:
self._axis.label.set_visible(True)
self._axis.set_label_position(self._axis_direction)
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.cla()
| gpl-3.0 |
hodlin/BDA_py_demos | demos_pystan/pystan_demo.py | 19 | 12220 | """Bayesian Data Analysis, 3rd ed
PyStan demo
Demo for using Stan with Python interface PyStan.
"""
import numpy as np
import pystan
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# ====== Bernoulli model =======================================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
for (n in 1:N)
y[n] ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[0,1,0,0,1,1,1,0,1,0])
fit = pystan.stan(model_code=bernoulli_code, data=data)
print(fit)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Vectorized Bernoulli model ============================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[1,1,1,0,1,1,1,0,1,1])
fit = pystan.stan(model_code=bernoulli_code, data=data)
# ====== Binomial model ========================================================
binomial_code = """
data {
int<lower=0> N;
int<lower=0> y;
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ binomial(N,theta);
}
"""
data = dict(N=10, y=8)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Re-running Binomial model with new data ===============================
data = dict(N=10, y=10)
fit = pystan.stan(fit=fit, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Comparison of two groups with Binomial ================================
binomial_code = """
data {
int<lower=0> N1;
int<lower=0> y1;
int<lower=0> N2;
int<lower=0> y2;
}
parameters {
real<lower=0,upper=1> theta1;
real<lower=0,upper=1> theta2;
}
transformed parameters {
real oddsratio;
oddsratio <- (theta2/(1-theta2))/(theta1/(1-theta1));
}
model {
theta1 ~ beta(1,1);
theta2 ~ beta(1,1);
y1 ~ binomial(N1,theta1);
y2 ~ binomial(N2,theta2);
}
"""
data = dict(N1=674, y1=39, N2=680, y2=22)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['oddsratio'], 50)
plt.show()
# ====== Gaussian linear model =================================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N=N, x=x, y=y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with adjustable priors ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
real pmualpha; // prior mean for alpha
real psalpha; // prior std for alpha
real pmubeta; // prior mean for beta
real psbeta; // prior std for beta
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
alpha ~ normal(pmualpha,psalpha);
beta ~ normal(pmubeta,psbeta);
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
x = x,
y = y,
pmualpha = y.mean(), # Centered
psalpha = (14-4)/6.0, # avg temp between 4-14
pmubeta = 0, # a priori increase and decrese as likely
psbeta = (.1--.1)/6.0 # avg temp probably does not increase more than 1
# degree per 10 years
)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with standardized data ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
transformed data {
vector[N] x_std;
vector[N] y_std;
x_std <- (x - mean(x)) / sd(x);
y_std <- (y - mean(y)) / sd(y);
}
parameters {
real alpha;
real beta;
real<lower=0> sigma_std;
}
transformed parameters {
vector[N] mu_std;
mu_std <- alpha + beta*x_std;
}
model {
alpha ~ normal(0,1);
beta ~ normal(0,1);
y_std ~ normal(mu_std, sigma_std);
}
generated quantities {
vector[N] mu;
real<lower=0> sigma;
mu <- mean(y) + mu_std*sd(y);
sigma <- sigma_std*sd(y);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear student-t model =======================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
real<lower=1,upper=80> nu;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
nu ~ gamma(2,0.1); // Juarez and Steel (2010)
y ~ student_t(nu, mu, sigma);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,12))
plt.subplot(4,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(4,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(4,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.subplot(4,1,4)
plt.hist(samples['nu'], 50)
plt.xlabel('nu')
plt.tight_layout()
plt.show()
# ====== Comparison of k groups (ANOVA) ========================================
group_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=group_code, data=data)
# Analyse results
mu = fit.extract(permuted=True)['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
# ====== Hierarchical prior model for comparison of k groups (ANOVA) ===========
# results do not differ much from the previous, because there is only
# few groups and quite much data per group, but this works as an example anyway
hier_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
real mu0; // prior mean
real<lower=0> sigma0; // prior std
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
mu0 ~ normal(10,10); // weakly informative prior
sigma0 ~ cauchy(0,4); // weakly informative prior
mu ~ normal(mu0, sigma0); // population prior with unknown parameters
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=hier_code, data=data)
# Analyse results
samples = fit.extract(permuted=True)
print "std(mu0): {}".format(np.std(samples['mu0']))
mu = samples['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
| gpl-3.0 |
musically-ut/statsmodels | statsmodels/graphics/factorplots.py | 28 | 7596 | # -*- coding: utf-8 -*-
"""
Authors: Josef Perktold, Skipper Seabold, Denis A. Engemann
"""
from statsmodels.compat.python import get_function_name, iterkeys, lrange, zip, iteritems
import numpy as np
from statsmodels.graphics.plottools import rainbow
import statsmodels.graphics.utils as utils
def interaction_plot(x, trace, response, func=np.mean, ax=None, plottype='b',
xlabel=None, ylabel=None, colors=[], markers=[],
linestyles=[], legendloc='best', legendtitle=None,
**kwargs):
"""
Interaction plot for factor level statistics.
Note. If categorial factors are supplied levels will be internally
recoded to integers. This ensures matplotlib compatiblity.
uses pandas.DataFrame to calculate an `aggregate` statistic for each
level of the factor or group given by `trace`.
Parameters
----------
x : array-like
The `x` factor levels constitute the x-axis. If a `pandas.Series` is
given its name will be used in `xlabel` if `xlabel` is None.
trace : array-like
The `trace` factor levels will be drawn as lines in the plot.
If `trace` is a `pandas.Series` its name will be used as the
`legendtitle` if `legendtitle` is None.
response : array-like
The reponse or dependent variable. If a `pandas.Series` is given
its name will be used in `ylabel` if `ylabel` is None.
func : function
Anything accepted by `pandas.DataFrame.aggregate`. This is applied to
the response variable grouped by the trace levels.
plottype : str {'line', 'scatter', 'both'}, optional
The type of plot to return. Can be 'l', 's', or 'b'
ax : axes, optional
Matplotlib axes instance
xlabel : str, optional
Label to use for `x`. Default is 'X'. If `x` is a `pandas.Series` it
will use the series names.
ylabel : str, optional
Label to use for `response`. Default is 'func of response'. If
`response` is a `pandas.Series` it will use the series names.
colors : list, optional
If given, must have length == number of levels in trace.
linestyles : list, optional
If given, must have length == number of levels in trace.
markers : list, optional
If given, must have length == number of lovels in trace
kwargs
These will be passed to the plot command used either plot or scatter.
If you want to control the overall plotting options, use kwargs.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> weight = np.random.randint(1,4,size=60)
>>> duration = np.random.randint(1,3,size=60)
>>> days = np.log(np.random.randint(1,30, size=60))
>>> fig = interaction_plot(weight, duration, days,
... colors=['red','blue'], markers=['D','^'], ms=10)
>>> import matplotlib.pyplot as plt
>>> plt.show()
.. plot::
import numpy as np
from statsmodels.graphics.factorplots import interaction_plot
np.random.seed(12345)
weight = np.random.randint(1,4,size=60)
duration = np.random.randint(1,3,size=60)
days = np.log(np.random.randint(1,30, size=60))
fig = interaction_plot(weight, duration, days,
colors=['red','blue'], markers=['D','^'], ms=10)
import matplotlib.pyplot as plt
#plt.show()
"""
from pandas import DataFrame
fig, ax = utils.create_mpl_ax(ax)
response_name = ylabel or getattr(response, 'name', 'response')
ylabel = '%s of %s' % (get_function_name(func), response_name)
xlabel = xlabel or getattr(x, 'name', 'X')
legendtitle = legendtitle or getattr(trace, 'name', 'Trace')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
x_values = x_levels = None
if isinstance(x[0], str):
x_levels = [l for l in np.unique(x)]
x_values = lrange(len(x_levels))
x = _recode(x, dict(zip(x_levels, x_values)))
data = DataFrame(dict(x=x, trace=trace, response=response))
plot_data = data.groupby(['trace', 'x']).aggregate(func).reset_index()
# return data
# check plot args
n_trace = len(plot_data['trace'].unique())
if linestyles:
try:
assert len(linestyles) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
linestyles = ['-'] * n_trace
if markers:
try:
assert len(markers) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
markers = ['.'] * n_trace
if colors:
try:
assert len(colors) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
#TODO: how to get n_trace different colors?
colors = rainbow(n_trace)
if plottype == 'both' or plottype == 'b':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
marker=markers[i], label=label,
linestyle=linestyles[i], **kwargs)
elif plottype == 'line' or plottype == 'l':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
label=label, linestyle=linestyles[i], **kwargs)
elif plottype == 'scatter' or plottype == 's':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.scatter(group['x'], group['response'], color=colors[i],
label=label, marker=markers[i], **kwargs)
else:
raise ValueError("Plot type %s not understood" % plottype)
ax.legend(loc=legendloc, title=legendtitle)
ax.margins(.1)
if all([x_levels, x_values]):
ax.set_xticks(x_values)
ax.set_xticklabels(x_levels)
return fig
def _recode(x, levels):
""" Recode categorial data to int factor.
Parameters
----------
x : array-like
array like object supporting with numpy array methods of categorially
coded data.
levels : dict
mapping of labels to integer-codings
Returns
-------
out : instance numpy.ndarray
"""
from pandas import Series
name = None
if isinstance(x, Series):
name = x.name
x = x.values
if x.dtype.type not in [np.str_, np.object_]:
raise ValueError('This is not a categorial factor.'
' Array of str type required.')
elif not isinstance(levels, dict):
raise ValueError('This is not a valid value for levels.'
' Dict required.')
elif not (np.unique(x) == np.unique(list(iterkeys(levels)))).all():
raise ValueError('The levels do not match the array values.')
else:
out = np.empty(x.shape[0], dtype=np.int)
for level, coding in iteritems(levels):
out[x == level] = coding
if name:
out = Series(out)
out.name = name
return out
| bsd-3-clause |
puchchi/stock_scraper_latest | Indicators/OnBalanceVolume.py | 1 | 1267 | import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import pandas as pd
class kOnBalanceVolume():
def __init__(self):
#do nothing
print "In kOBV class"
def calculate(self, dataFrame):
##### FORMULA #####
# If the closing price is above the prior close price then:
# Current OBV = Previous OBV + Current Volume
# If the closing price is below the prior close price then:
# Current OBV = Previous OBV - Current Volume
# If the closing prices equals the prior close price then:
# Current OBV = Previous OBV (no change)
# First first obv entry will be 0.
OBV = pd.Series({dataFrame.index[0] : 0})
for i in range(1, dataFrame["Close"].count()):
if dataFrame["Close"][i] > dataFrame["Close"][i-1]:
OBV = OBV.append(pd.Series({dataFrame.index[i] : OBV[i-1] + dataFrame["Volume"][i]}))
elif dataFrame["Close"][i] < dataFrame["Close"][i-1]:
OBV = OBV.append(pd.Series({dataFrame.index[i] : OBV[i-1] - dataFrame["Volume"][i]}))
else:
OBV = OBV.append(pd.Series({dataFrame.index[i] : OBV[i-1]}))
return OBV | mit |
joh12041/quac | lib/geo/optimize.py | 1 | 8769 | # Copyright (c) Los Alamos National Security, LLC, and others.
from collections import defaultdict
import hashlib
import time
from django.contrib.gis import geos
import numpy as np
import scipy.optimize as scopt
import sklearn.mixture as skmix
import multicore
import testable
import u
l = u.l
class Weight(object):
'''Optimizes the token_weights of a gmm.Model to minimize error. Objective
function to be minimized is:
argmin_w \sum_i [ \sum_j (c_ij * s(m_j)) / \sum_j s(m_j) ]
where c_ij is the cost incurred by model m_j on tweet i, and
s(m_j) = 1 / (1 + e^{-w_j}) is the weight for token j.
By passing w_j through logistic function, no (0,1) constraints on w_j
needed.
gmms_list ........ list of lists of gmms, one list per tweet
errors_list ...... list of lists of errors, one list per tweet,
corresponding to each gmm in gmms_list
This test compares the analytical and empirical gradient of the
objective function. If the difference is small, we probably implemented
func and func_deriv correctly.
>>> from . import gmm
>>> gmm.Token.parms_init({})
>>> mp = geos.MultiPoint(geos.Point(1,2), geos.Point(3,4), srid=4326)
>>> m1 = gmm.Geo_GMM.from_fit(mp, 1, 'a')
>>> m2 = gmm.Geo_GMM.from_fit(mp, 2, 'b')
>>> m3 = gmm.Geo_GMM.from_fit(mp, 1, 'c')
>>> m = Weight([[m1, m2], [m2, m3], [m1, m3]],
... [[100, 50], [50, 200], [80, 400]], identity_feature=True,
... misc_feature=False)
>>> scopt.check_grad(m.func, m.func_deriv,
... np.ones(len(m.all_gmms)) / len(m.all_gmms)) < 0.0001
True
>>> tok_weights = m.optimize()
>>> tok_weights['b'] > tok_weights['a']
True
>>> tok_weights['b'] > tok_weights['c']
True
>>> m = Weight([[m1, m2], [m2, m3], [m1, m3]],
... [[100, 50], [50, 200], [80, 400]], regularizer=0.,
... identity_feature=True, misc_feature=False)
>>> scopt.check_grad(m.func, m.func_deriv,
... np.ones(len(m.all_gmms)) / len(m.all_gmms) ) < 0.0001
True
>>> tok_weights = m.optimize()'''
def __init__(self, gmms_list, errors_list, regularizer=1.0,
identity_feature=True, misc_feature=False, verbose=False,
init_by_feature='', min_value=1e-10):
self.min_value = min_value
self.init_by_feature = init_by_feature
self.gmms_list = gmms_list
self.errors_list = errors_list
self.all_gmms = self.make_gmm_list()
self.make_feature_vectors(identity_feature, misc_feature)
self.regularizer = regularizer
self.verbose = verbose
self.deriv = np.zeros(len(self.feature_alphabet))
self.n_fun_calls = 0
self.n_deriv_calls = 0
self.n_cache_hits = 0
# cached data
self.weight_sums = np.zeros(len(errors_list))
self.weight_error_sums = np.zeros(len(errors_list))
self.tweets = []
self.hash = 0
for (gmms,errors) in zip(gmms_list, errors_list):
self.tweets.append(list(zip(gmms, errors)))
def make_gmm_list(self):
return sorted(list(set([g for sublist in self.gmms_list
for g in sublist])), key=lambda gm:gm.tokens)
def make_feature_vectors(self, identity_feature, misc_feature):
'''Appends a sparse feature vector to each gmm. This also initializes
feature_alphabet'''
self.feature_alphabet = defaultdict(lambda: len(self.feature_alphabet))
for g in self.all_gmms:
g.feature_vector = defaultdict(lambda : 0)
for (f,v) in g.features(identity_feature, misc_feature).items():
g.feature_vector[self.feature_alphabet[f]] = v
def dot(self, feature_vector, x):
'Dot product of feature_vector (a dict) and x (dense array)'
return sum(x[fi] * v for (fi,v) in feature_vector.items())
def logistic(self, x):
return 1.0 / (1.0 + np.exp(-x))
def score_gmms(self, x):
'Score is 1 / (1 + exp(-dot(g.feature_vector, x)))'
for g in self.all_gmms:
g.score = self.logistic(self.dot(g.feature_vector, x))
# array modifications in place
def update_cache(self, x):
# Insane one-liner to get hash of a numpy array.
# This tells us whether the array has changed.
# FIXME: really need this? looks like func_deriv called exactly once per
# call to func.
h = int(hashlib.sha1(x.view(np.uint8)).hexdigest(), 16)
if h != self.hash:
self.hash = h
self.f_value = 0.
self.score_gmms(x)
for ti,tweet in enumerate(self.tweets):
self.weight_sums[ti] = 0.
self.weight_error_sums[ti] = 0.
for (gmm, error) in tweet:
self.weight_error_sums[ti] += gmm.score * error
self.weight_sums[ti] += gmm.score
if self.weight_sums[ti] != 0.:
self.f_value += self.weight_error_sums[ti] / self.weight_sums[ti]
self.f_value += self.reg(x)
else:
self.n_cache_hits += 1
def func(self, x):
self.n_fun_calls += 1
self.update_cache(x)
return self.f_value
def func_deriv(self, x):
self.n_deriv_calls += 1
self.update_cache(x)
self.deriv.fill(0.0)
for ti,tweet in enumerate(self.tweets):
for (gmm,error) in tweet:
entropy = (gmm.score
* (1.0 - gmm.score))
if self.weight_sums[ti] * self.weight_sums[ti] == 0:
part = 0.
else:
part = (entropy * (error * self.weight_sums[ti] -
self.weight_error_sums[ti]) /
(self.weight_sums[ti] * self.weight_sums[ti]))
for (feature_index,feature_value) in gmm.feature_vector.items():
self.deriv[feature_index] += part * feature_value
self.reg_deriv(x)
return self.deriv
def reg(self, x):
return self.regularizer * np.sum(x**2) / 2.0
def reg_deriv(self, x):
self.deriv += self.regularizer * x
def initialize_from_feature(self):
init_vals = np.ones(len(self.feature_alphabet))
for g in self.all_gmms:
f = next(iter(g.features(identity=True,misc=False).keys()))
features = g.features(identity=False,misc=True)
init_vals[self.feature_alphabet[f]] = \
1 / (1 + features[self.init_by_feature]) - 0.5
return init_vals
def initialize_random(self):
return np.array([u.rand.random() - 0.5 for
i in range(0, len(self.feature_alphabet))])
def optimize(self):
'Run optimization and return dictionary of token->weight'
if self.init_by_feature == '':
init_vals = self.initialize_random()
else:
init_vals = self.initialize_from_feature()
t_start = time.time()
l.debug('minimizing obj f\'n with %d weights...' %
len(self.feature_alphabet))
l.debug('initial function value=%g' % self.func(init_vals))
res = scopt.minimize(self.func, init_vals,
method='L-BFGS-B', jac=self.func_deriv,
options={'disp': self.verbose}, tol=1e-4)
l.debug('minimized in %s; %d f calls and %d f\' calls (%d cache hits)'
% (u.fmt_seconds(time.time() - t_start), self.n_fun_calls,
self.n_deriv_calls, self.n_cache_hits))
l.debug('final function value=%g' % self.func(res.x))
self.score_gmms(res.x)
di = dict([(next(iter(gmm.tokens.keys())),
max(self.min_value, gmm.score))
for gmm in self.all_gmms])
if self.verbose:
for (fv,fi) in self.feature_alphabet.items():
l.debug('feature weight %s=%g' % (fv,res.x[fi]))
for (t,w) in di.items():
l.debug('token weight %s=%s'%(t,str(w)))
# clean up
for g in self.all_gmms:
g.feature_vector = None
return di
# test that self.all_gmms has stable order
# disabled for now (see issue #100)
testable.manualonly_register('''
>>> import random
>>> from . import gmm
>>> def test_random():
... u.rand = random.Random(123)
... gmm.Token.parms_init({})
... mp = geos.MultiPoint(geos.Point(1,2), geos.Point(3,4), srid=4326)
... m1 = gmm.Geo_GMM.from_fit(mp, 1, 'a')
... m2 = gmm.Geo_GMM.from_fit(mp, 2, 'b')
... m3 = gmm.Geo_GMM.from_fit(mp, 1, 'c')
... m = Weight([[m1, m2], [m2, m3], [m1, m3]],
... [[100, 50], [50, 200], [80, 400]], identity_feature=True,
... misc_feature=False)
... return list(m.all_gmms)
>>> all((test_random()[0].tokens == test_random()[0].tokens for i in range(100)))
True
''')
| apache-2.0 |
AlexRobson/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
fabriziocosta/GraphLearn_examples | notebooks/Abstract/infeval.py | 1 | 4688 | from eden.util import configure_logging
import logging
configure_logging(logging.getLogger(),verbosity=1)
'''
GET RNA DATA
'''
from eden.converter.fasta import fasta_to_sequence
import itertools
from eden.util import random_bipartition_iter
import random
import numpy
def rfam_uri(family_id):
return 'http://rfam.xfam.org/family/%s/alignment?acc=%s&format=fastau&download=0'%(family_id,family_id)
def rfam_uri(family_id):
return '%s.fa'%(family_id)
RFAM="RF01725"
#cutoff 162 (44.0)
#cutoff 1725 (38.0)
#cutoff rest (29)
sizes=[50,100,200,400]
repeats=3
def get_sequences(size=9999,rand=False):
sequences = get_sequences_with_names(size=size,rand=rand)
return [ b for (a,b) in sequences ]
def get_sequences_with_names(size=9999, rand=0):
if rand>0:
sequences , boring = random_bipartition_iter(fasta_to_sequence("../toolsdata/%s.fa" % RFAM),.9,random_state=random.random()*rand)
sequences = itertools.islice( sequences , size)
else:
sequences = itertools.islice( fasta_to_sequence("../toolsdata/%s.fa" % RFAM), size)
return sequences
import random
import graphlearn.abstract_graphs.RNA as rna
from graphlearn.feasibility import FeasibilityChecker as Checker
from graphlearn.estimator import Wrapper as estimatorwrapper
import graphlearn.utils.draw as draw
from graphlearn.graphlearn import Sampler as GLS
import itertools
def fit_sample(graphs, random_state=random.random()):
'''
graphs -> more graphs
'''
graphs = list(graphs)
estimator=estimatorwrapper( nu=.33, cv=2, n_jobs=-1)
sampler=rna.AbstractSampler(radius_list=[0,1],
thickness_list=[2],
min_cip_count=1,
min_interface_count=2,
preprocessor=rna.PreProcessor(base_thickness_list=[1],ignore_inserts=True),
postprocessor=rna.PostProcessor(),
estimator=estimator
#feasibility_checker=feasibility
)
sampler.fit(graphs,grammar_n_jobs=4,grammar_batch_size=1)
#logger.info('graph grammar stats:')
dataset_size, interface_counts, core_counts, cip_counts = sampler.grammar().size()
#logger.info('#instances:%d #interfaces: %d #cores: %d #core-interface-pairs: %d' % (dataset_size, interface_counts, core_counts, cip_counts))
graphs = [ b for a ,b in graphs ]
graphs = sampler.sample(graphs,
n_samples=3,
batch_size=1,
n_steps=50,
n_jobs=4,
quick_skip_orig_cip=True,
probabilistic_core_choice=True,
burnin=10,
improving_threshold=0.9,
improving_linear_start=0.3,
max_size_diff=20,
accept_min_similarity=0.65,
select_cip_max_tries=30,
keep_duplicates=False,
include_seed=False,
backtrack=10,
monitor=False)
result=[]
for graphlist in graphs:
result+=graphlist
# note that this is a list [('',sequ),..]
return result
def eval(repeats,size):
result=[]
for i in range(repeats):
graphs=get_sequences_with_names(size=size, rand=(i+3)*10)
zz=fit_sample(graphs)
z=[b for a ,b in zz]
cmpath='../%s.cm' % RFAM
result+=rna.infernal_checker(z,cmfile=cmpath, cmsearchbinarypath='../toolsdata/cmsearch')
a = numpy.array(result)
mean = numpy.mean(a, axis=0)
std = numpy.std(a, axis=0)
print 'size:%d mean:%f std:%f' % (size,mean,std)
return mean,std
'''
import numpy as np
import matplotlib.pyplot as plt
def make_bar_plot(labels=('G1', 'G2', 'G3', 'G4', 'G5'),means=(20, 35, 30, 35, 27),stds=(2, 3, 4, 1, 2)):
N = len(labels)
ind = np.arange(N)
width = .5 #0.35
p1 = plt.bar(ind, means, width, color='r', yerr=stds)
plt.ylabel('Scores')
plt.title('Scores by training size')
plt.xticks(ind + width/2, labels )
plt.yticks(np.arange(0, 100, 10))
plt.show()
'''
means=[]
stds=[]
for size in sizes:
m,s = eval(repeats,size)
means.append(m)
stds.append(s)
print 'size: '+str(sizes)
print 'means: '+str(means)
print 'stds: '+ str(stds)
#make_bar_plot(sizes,means,stds)
| gpl-2.0 |
simvisage/oricreate | docs/howtos/ex08_rigid_facets/sim014_single_fold_quad_nonsym_psi_cntl.py | 1 | 3965 | r'''
Fold control using dihedral angle with quadrilateral facets
-----------------------------------------------------------
This example shows the folding process controlled by a dihedral
angle between two facets. In contrast to the previous
example, this one uses nonsymmetric arrangment of triangles
to compose quadrilateral facets. It represents a Miura-Ori
vertex with all associated kinematic constraints.
'''
import numpy as np
from oricreate.api import \
SimulationTask, SimulationConfig, \
GuConstantLength, GuDofConstraints, GuPsiConstraints, fix, \
FTV, FTA
def create_cp_factory():
# begin
from oricreate.api import CreasePatternState, CustomCPFactory
x = np.array([[-1, 0, 0],
[0, 0, 0],
[1, 1, 0],
[2, 0, 0],
[1, -1, 0],
[-1, 1, 0],
[-1, -1, 0],
[2, 1, 0],
[2, -1, 0],
], dtype='float_')
L = np.array([[0, 1], [1, 2], [2, 0],
[1, 3], [1, 7],
[1, 4], [3, 4],
#[1, 5],
[6, 1],
[0, 5], [2, 5],
[0, 6], [4, 6],
[3, 7], [2, 7],
[3, 8], [4, 8]
],
dtype='int_')
F = np.array([[0, 1, 2],
[1, 7, 2],
[1, 4, 3],
[1, 4, 6],
[0, 2, 5],
[0, 1, 6],
[3, 1, 7],
[3, 4, 8],
], dtype='int_')
cp = CreasePatternState(X=x,
L=L,
F=F
)
cp_factory = CustomCPFactory(formed_object=cp)
# end
return cp_factory
if __name__ == '__main__':
cp_factory_task = create_cp_factory()
cp = cp_factory_task.formed_object
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cp.plot_mpl(ax, facets=True)
plt.tight_layout()
plt.show()
# Link the crease factory with the constraint client
gu_constant_length = GuConstantLength()
psi_max = np.pi * .49
gu_psi_constraints = \
GuPsiConstraints(forming_task=cp_factory_task,
psi_constraints=[([(2, 1.0)], 0.0),
([(7, 1.0)], 0.0),
([(4, 1.0)], 0.0),
([(6, 1.0)], 0.0),
([(3, 1.0)], lambda t: -psi_max * t),
])
dof_constraints = fix([0], [1]) + fix([1], [0, 1, 2]) \
+ fix([2, 4], [2])
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
sim_config = SimulationConfig(goal_function_type='none',
gu={'cl': gu_constant_length,
'u': gu_dof_constraints,
'psi': gu_psi_constraints},
acc=1e-8, MAX_ITER=100)
sim_task = SimulationTask(previous_task=cp_factory_task,
config=sim_config,
n_steps=25)
cp.u[(0, 3), 2] = -0.1
cp.u[(1), 2] = 0.1
sim_task.u_1
cp = sim_task.formed_object
ftv = FTV()
ftv.add(sim_task.sim_history.viz3d_dict['node_numbers'], order=5)
ftv.add(sim_task.sim_history.viz3d)
ftv.add(gu_dof_constraints.viz3d)
fta = FTA(ftv=ftv)
fta.init_view(a=200, e=35, d=5, f=(0, 0, 0), r=0)
fta.add_cam_move(a=200, e=34, n=5, d=5, r=0,
duration=10,
vot_fn=lambda cmt: np.linspace(0, 1, 4),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.plot()
fta.render()
fta.configure_traits()
| gpl-3.0 |
JudoWill/ResearchNotebooks | SadiVariation.py | 1 | 9173 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os, os.path
import sys
import pandas as pd
import numpy as np
os.chdir('/home/will/SadiVariation/')
sys.path.append('/home/will/PySeqUtils/')
# <codecell>
from GeneralSeqTools import fasta_reader, fasta_writer, WebPSSM_V3_series
import glob
# <codecell>
files = [('x4_seqs.fasta.old', 'x4_seqs.fasta'),
('r5_seqs.fasta.old', 'r5_seqs.fasta')]
for ifile, ofile in files:
with open(ifile) as handle:
with open(ofile, 'w') as ohandle:
for name, seq in fasta_reader(handle):
fasta_writer(ohandle, [(name, seq[1:-1])])
# <codecell>
subtype_files = glob.glob('/home/will/WLAHDB_data/SubtypeGuess/*.gb')
subtypes = []
for f in subtype_files:
gb = f.rsplit(os.sep, 1)[-1].split('.')[0]
with open(f) as handle:
subtype = handle.next().strip()
if subtype != 'Unk':
subtypes.append((int(gb), subtype))
subtype_df = pd.DataFrame(subtypes, columns = ['GI', 'Subtype'])
subtype_ser = subtype_df.groupby('GI')['Subtype'].first()
# <codecell>
with open('hxb2.needle') as handle:
aligned_seqs = list(fasta_reader(handle))
# <codecell>
from scipy.stats import linregress
hxb2_inds = [350, 364, 375, 388, 398, 456]
our_inds = [-103, -99, -85, -74, -63, 0]
m, b, _, _, _ = linregress(hxb2_inds, our_inds)
new_our_inds = np.arange(0, 634)
new_hxb2_inds = np.ceil(m*new_our_inds+b)
# <codecell>
starts = range(0, len(aligned_seqs), 2)
aligned = []
for s in starts:
_, hxb2_seq = aligned_seqs[s]
gi, gi_seq = aligned_seqs[s+1]
aseq = ''.join(q for q, r in zip(gi_seq, hxb2_seq) if r.isalpha())
aligned.append((int(gi), np.array(list(aseq))))
# <codecell>
aligned_ser = pd.DataFrame(aligned, columns=['GI', 'alignment']).groupby('GI').first()['alignment']
subs, _ = subtype_ser.align(aligned_ser, join='right')
# <codecell>
subs.value_counts()
# <codecell>
aset = set(gi for gi, _ in aligned)
with open('/home/will/WLAHDB_data/SeqDump/B_v3.fasta') as handle:
v3_seqs = []
for gi, seq in fasta_reader(handle):
if int(gi) in aset:
v3_seqs.append((int(gi), seq))
print len(v3_seqs)
# <codecell>
trop_dict = dict((int(gi), 'X4' if trop > 0.5 else 'R5') for gi, trop in WebPSSM_V3_series(v3_seqs))
v3_ser = pd.Series(trop_dict)
trop_data, _ = v3_ser.align(aligned_ser, join='right')
# <codecell>
trop_data.value_counts()
# <codecell>
lanl_data = pd.read_csv('/home/will/HIVTropism/R5Cluster/LANLResults.tsv', sep = '\t')
# <codecell>
wtissue, _ = lanl_data['NewSimpleTissue'].dropna().align(aligned_ser, join = 'right')
wcor_receptor, _ = lanl_data['Coreceptor'].dropna().align(aligned_ser, join = 'right')
# <codecell>
check_seqs = [('CEBP-US2', 'ATTTCATCA', -170, -162),
('ATF-CREB', 'CTGACATCG', -123, -115),
('CEBP-US1', 'AGCTTTCTACAA', -114, -103),
('NFKB-II', 'AGGGACTTTCC', -103, -93),
('NFKB-I', 'GGGGACTTTCC', -99, -89),
('SP-III', 'GAGGCGTGG', -85, -77),
('SP-II', 'TGGGCGGGA', -74, -66),
('SP-I', 'GGGGAGTGG', -63, -55),
('AP1-I', 'TTGAGTGCT', 85, 93),
('AP1-II', 'TGTTGTGTGAC', 121, 138),
('AP1-III', 'TTTAGTCAG', 153, 161),
('DS3-B', 'TCAGTGTGGAAAATC', 158, 175),
('DS3-C', 'GTAGTGTGGAAAATC', 158, 175),
('DS3-D', 'TCAGTGTGGAAAATC', 158, 175),
('DS3-A', 'ACTGTGTAAAAATC', 158, 175)
]
slop = 20
check_dict = {'Subs':subs, 'tissue': wtissue, 'Coreceptor': trop_data}
for name, seq, start, stop in check_seqs:
mask = (new_hxb2_inds>(start-slop)) & (new_hxb2_inds<(stop+slop))
extracted = aligned_ser.map(lambda x: ''.join(x[mask]).replace('-', ''))
check_dict[name] = extracted.map(lambda x: seq in x).map(float)
check_dict[name][extracted.map(len)==0] = np.nan
df = pd.DataFrame(check_dict)
# <codecell>
# <codecell>
print pd.pivot_table(df, rows = 'Subs', aggfunc='mean').T*100
# <codecell>
print 9.0/290
# <codecell>
print pd.pivot_table(df, rows = 'Coreceptor', aggfunc='mean').T*100
# <codecell>
def find_best(tf_seq, ltr_seq):
width = len(tf_seq)
scores = []
for start in range(0, len(ltr_seq)-width):
scores.append(sum(s == t for s, t in zip(tf_seq, ltr_seq[start:(start+width)])))
if scores:
return max(scores)/float(width)
else:
return np.nan
best_dict = {'Subs':subs, 'tissue': wtissue, 'Coreceptor': trop_data}
for name, seq, start, stop in check_seqs:
print name
mask = (new_hxb2_inds>(start-slop)) & (new_hxb2_inds<(stop+slop))
extracted = aligned_ser.map(lambda x: ''.join(x[mask]).replace('-', ''))
best_dict[name] = extracted.map(lambda x: find_best(seq, x))
bdf = pd.DataFrame(best_dict)
# <codecell>
print (1-pd.pivot_table(bdf, rows = 'Subs')).T*100
# <codecell>
print (1-pd.pivot_table(bdf, rows = 'Coreceptor')).T*100
# <codecell>
slop=5
mask = (new_hxb2_inds>(159-slop)) & (new_hxb2_inds<(174+slop))
ds3_ser = aligned_ser.map(lambda x: ''.join(x[mask]).replace('-', ''))
ds3_ser[ds3_ser.map(len)==0] = np.nan
with open('r5_seqs.fasta', 'w') as handle:
fasta_writer(handle, ds3_ser[trop_data == 'R5'].dropna().to_dict().items())
with open('x4_seqs.fasta', 'w') as handle:
fasta_writer(handle, ds3_ser[trop_data == 'X4'].dropna().to_dict().items())
with open('subC_seqs.fasta', 'w') as handle:
fasta_writer(handle, ds3_ser[subs == 'C'].dropna().to_dict().items())
#print ds3_ser[trop_data == 'X4'].dropna()
# <codecell>
quick_seqs = []
with open('r5_seqs.fasta') as handle:
for name, seq in fasta_reader(handle):
quick_seqs.append({
'Name':name,
'Trop':'R5',
'Seq':seq
})
with open('x4_seqs.fasta') as handle:
for name, seq in fasta_reader(handle):
quick_seqs.append({
'Name':name,
'Trop':'X4',
'Seq':seq
})
# <codecell>
from Bio.Seq import Seq
from Bio import Motif
from StringIO import StringIO
from itertools import groupby
from operator import methodcaller
from Bio.Alphabet import IUPAC
def yield_motifs():
with open('/home/will/LTRtfAnalysis/Jaspar_PWMs.txt') as handle:
for key, lines in groupby(handle, methodcaller('startswith', '>')):
if key:
name = lines.next().strip().split()[-1].lower()
else:
tmp = ''.join(lines)
mot = Motif.read(StringIO(tmp), 'jaspar-pfm')
yield name, mot
yield name+'-R', mot.reverse_complement()
pwm_dict = {}
for num, (name, mot) in enumerate(yield_motifs()):
if num % 100 == 0:
print num
pwm_dict[name] = mot
# <codecell>
from functools import partial
from scipy.stats import ttest_ind, gaussian_kde, chi2_contingency
def score_seq(mot, seq):
bseq = Seq(seq, alphabet=IUPAC.unambiguous_dna)
scores = mot.scanPWM(bseq)
return np.max(scores)
def make_cdfs(kde, points):
cdf = []
for point in points:
cdf.append(kde.integrate_box_1d(-np.inf, point))
return 1-np.array(cdf)
wanted_mots = ['cebpa-R',
#'nfatc2',
'nfatc2-R']
fig, axs = plt.subplots(2,1, sharex=True, figsize = (10, 5))
quick_seqs_df = pd.DataFrame(quick_seqs)
r5_mask = quick_seqs_df['Trop'] == 'R5'
x4_mask = quick_seqs_df['Trop'] == 'X4'
for ax, mot in zip(axs.flatten(), wanted_mots):
quick_seqs_df[mot] = quick_seqs_df['Seq'].map(partial(score_seq, pwm_dict[mot]))
r5_vals = quick_seqs_df[mot][r5_mask].dropna().values
x4_vals = quick_seqs_df[mot][x4_mask].dropna().values
r5_kde = gaussian_kde(r5_vals)
x4_kde = gaussian_kde(x4_vals)
points = np.linspace(0, 15)
ax.plot(points, make_cdfs(r5_kde, points), 'b', label = 'R5')
ax.plot(points, make_cdfs(x4_kde, points), 'g', label = 'X4')
ax.set_title(mot)
if ax.is_last_row():
ax.set_xlabel('TF Score')
else:
ax.legend()
ax.set_ylabel('Frac Sequences')
thresh = Motif.Thresholds.ScoreDistribution(pwm_dict[mot], precision = 100).threshold_fpr(0.005)
ax.vlines(thresh, 0, 1)
ch2table = [[(r5_vals>thresh).sum(), (r5_vals<=thresh).sum()],
[(x4_vals>thresh).sum(), (x4_vals<=thresh).sum()],]
_, pval, _, _ = chi2_contingency(ch2table)
print mot, np.mean(r5_vals), np.mean(x4_vals), pval
plt.tight_layout()
#plt.savefig('/home/will/Dropbox/Wigdahl HIV Lab/SadiTFFigure/TFscores.png', dpi = 300)
# <codecell>
ax.vlines?
# <codecell>
from sklearn.svm import OneClassSVM
data = np.array([2,5,30,4,8,3,5,4,2,5,3,4,5,4]).reshape((-1, 1))
tmp = OneClassSVM().fit(data).predict(data)
data[tmp>0]
# <codecell>
count = 0
for num, f in enumerate(glob.glob('/home/will/WLAHDB_data/RegionSplit/ltr/*.fasta')):
if num % 5000 == 0:
print num, count
with open(f) as handle:
if len(handle.read()) > 10:
count += 1
# <codecell>
| mit |
arjoly/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/utils/tests/test_validation.py | 6 | 21478 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from sklearn.utils.testing import assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import SkipTest
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted,
check_consistent_length,
assert_all_finite,
)
import sklearn
from sklearn.exceptions import NotFittedError
from sklearn.exceptions import DataConversionWarning
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
X2 = as_float_array(X, copy=False)
assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
assert_equal(X2.dtype, np.float64)
# Test int dtypes <= 32bit
tested_dtypes = [np.bool,
np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32]
for dtype in tested_dtypes:
X = X.astype(dtype)
X2 = as_float_array(X)
assert_equal(X2.dtype, np.float32)
# Test object dtype
X = X.astype(object)
X2 = as_float_array(X, copy=True)
assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d=False
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# ensure_2d=True
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
check_array, [0, 1, 2], ensure_2d=True)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_on_mock_dataframe():
arr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
mock_df = MockDataFrame(arr)
checked_arr = check_array(mock_df)
assert_equal(checked_arr.dtype,
arr.dtype)
checked_arr = check_array(mock_df, dtype=np.float32)
assert_equal(checked_arr.dtype, np.dtype(np.float32))
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_accept_sparse_type_exception():
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
invalid_type = SVR()
msg = ("A sparse matrix was passed, but dense data is required. "
"Use X.toarray() to convert to a dense numpy array.")
assert_raise_message(TypeError, msg,
check_array, X_csr, accept_sparse=False)
assert_raise_message(TypeError, msg,
check_array, X_csr, accept_sparse=None)
msg = ("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided 'accept_sparse={}'.")
assert_raise_message(ValueError, msg.format(invalid_type),
check_array, X_csr, accept_sparse=invalid_type)
msg = ("When providing 'accept_sparse' as a tuple or list, "
"it must contain at least one string value.")
assert_raise_message(ValueError, msg.format([]),
check_array, X_csr, accept_sparse=[])
assert_raise_message(ValueError, msg.format(()),
check_array, X_csr, accept_sparse=())
assert_raise_message(TypeError, "SVR",
check_array, X_csr, accept_sparse=[invalid_type])
# Test deprecation of 'None'
assert_warns(DeprecationWarning, check_array, X, accept_sparse=None)
def test_check_array_accept_sparse_no_exception():
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
check_array(X_csr, accept_sparse=True)
check_array(X_csr, accept_sparse='csr')
check_array(X_csr, accept_sparse=['csr'])
check_array(X_csr, accept_sparse=('csr',))
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
def test_check_dataframe_fit_attribute():
# check pandas dataframe with 'fit' column does not raise error
# https://github.com/scikit-learn/scikit-learn/issues/8415
try:
import pandas as pd
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X, columns=['a', 'b', 'fit'])
check_consistent_length(X_df)
except ImportError:
raise SkipTest("Pandas not found")
def test_suppress_validation():
X = np.array([0, np.inf])
assert_raises(ValueError, assert_all_finite, X)
sklearn.set_config(assume_finite=True)
assert_all_finite(X)
sklearn.set_config(assume_finite=False)
assert_raises(ValueError, assert_all_finite, X)
| bsd-3-clause |
lenovor/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
nmartensen/pandas | pandas/core/groupby.py | 1 | 148840 | import types
from functools import wraps
import numpy as np
import datetime
import collections
import warnings
import copy
from textwrap import dedent
from pandas.compat import (
zip, range, lzip,
callable, map
)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat import set_function_name
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype,
is_interval_dtype,
is_datetimelike,
is_datetime64_any_dtype,
is_bool, is_integer_dtype,
is_complex_dtype,
is_bool_dtype,
is_scalar,
is_list_like,
needs_i8_conversion,
_ensure_float64,
_ensure_platform_int,
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isna, notna, _maybe_fill
from pandas.core.common import (_values_from_object, AbstractMethodError,
_default_index)
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import (Index, MultiIndex,
CategoricalIndex, _ensure_index)
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.core.sorting import (get_group_index_sorter, get_group_index,
compress_group_index, get_flattened_iterator,
decons_obs_group_ids, get_indexer_dict)
from pandas.util._decorators import (cache_readonly, Substitution,
Appender, make_signature)
from pandas.io.formats.printing import pprint_thing
from pandas.util._validators import validate_kwargs
import pandas.core.algorithms as algorithms
import pandas.core.common as com
from pandas.core.config import option_context
from pandas.plotting._core import boxplot_frame_groupby
from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT
from pandas._libs.lib import count_level_2d
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumcount', 'ngroup',
'resample',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = ((_common_apply_whitelist |
{'nlargest', 'nsmallest'}) -
{'boxplot'}) | frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = ((_common_apply_whitelist |
frozenset(['dtypes', 'corrwith'])) -
{'boxplot'})
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
sort : bool, default False
whether the resulting grouper should be sorted
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level),
name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis,
convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
""" default to the standard binner here """
group_axis = obj._get_axis(self.axis)
return Grouping(group_axis, None, obj=obj, name=self.key,
level=self.level, sort=self.sort, in_axis=False)
@property
def groups(self):
return self.grouper.groups
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)])
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
self._group_selection = None
# GH12839 clear cached selection too when changing group selection
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
"""
grp = self.grouper
if self.as_index and getattr(grp, 'groupings', None) is not None and \
self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
# GH12839 clear selected obj cache when group selection changes
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_group_selection()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Substitution(name='groupby')
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform"""
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Note
----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original._get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj, numeric_only=False):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
if numeric_only is True, then only try to cast numerics
and not datetimelikes
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _cython_transform(self, how, numeric_only=True):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = _ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in values:
if v is not None:
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
return self._cython_agg_general('var', **kwargs)
else:
self._set_group_selection()
f = lambda x: x.var(ddof=ddof, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
""" add numeric operations to the GroupBy generically """
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum)
cls.prod = groupby_function('prod', 'prod', np.prod)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False, _convert=True)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False, _convert=True)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
self._set_group_selection()
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionaility per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionaility per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.ffill(limit=limit))
ffill = pad
@Substitution(name='groupby')
@Appender(_doc_template)
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.bfill(limit=limit))
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ``dropna`` allows count ignoring NaN
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying ``as_index=False`` in ``groupby`` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna='%s' keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'." % (dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# Note: when agg-ing picker doesn't raise this,
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
@Appender(_doc_template)
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
.. versionadded:: 0.20.2
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
.cumcount : Number the rows in each group.
"""
self._set_group_selection()
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
@Appender(_doc_template)
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
See also
--------
.ngroup : Number the groups themselves.
"""
self._set_group_selection()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs, ['numeric_only'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
labels, _, ngroups = self.grouper.group_info
# filled in by Cython
indexer = np.zeros_like(labels)
libgroupby.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
output[name] = algorithms.take_nd(obj.values, indexer)
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_iterator(comp_ids,
ngroups,
self.levels,
self.labels)
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index)
for ping in self.groupings]
return get_indexer_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = _ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup or None)
return Series(out,
index=self.result_index,
dtype='int64')
@cache_readonly
def _max_groupsize(self):
"""
Compute size of largest group
"""
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = _ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = _ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
'cummin': 'group_cummin',
'cummax': 'group_cummax',
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return SelectionMixin._builtin_table.get(arg, arg)
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError(
"categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ['add', 'prod', 'cumsum', 'cumprod']:
raise NotImplementedError(
"datetime64 type does not support {} "
"operations".format(how))
elif is_timedelta64_dtype(values):
if how in ['prod', 'cumprod']:
raise NotImplementedError(
"timedelta64 type does not support {} "
"operations".format(how))
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
if is_datetimelike:
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
# we use iNaT for the missing value on ints
# so pre-convert to guard this condition
if (values == iNaT).any():
values = _ensure_float64(values)
else:
values = values.astype('int64', copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric,
is_datetimelike)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
result = self._transform(
result, values, labels, func, is_numeric, is_datetimelike)
if is_integer_dtype(result):
mask = result == iNaT
if mask.any():
result = result.astype('float64')
result[mask] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0):
return self._cython_operation('aggregate', values, how, axis)
def transform(self, values, how, axis=0):
return self._cython_operation('transform', values, how, axis)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric, is_datetimelike):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def _transform(self, result, values, comp_ids, transform_func,
is_numeric, is_datetimelike):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, is_datetimelike)
else:
transform_func(result, values, comp_ids, is_datetimelike)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False, mutated=False):
self.bins = _ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = _ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = \
index._get_grouper_for_level(self.grouper, level)
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.grouper = self.grouper._codes_for_groupby(self.sort)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
values = _ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
labels, uniques = algorithms.factorize(
self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(Categorical.from_codes(self.labels,
self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
mutated=False):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0 or level < -1:
raise ValueError('level > 0 or level < -1 only valid with '
' MultiIndex')
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in obj.index.names
for g in keys)
else:
all_in_columns_index = False
except Exception:
all_in_columns_index = False
if not any_callable and not all_in_columns_index and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if gpr in obj.index.names:
warnings.warn(
("'%s' is both a column name and an index level.\n"
"Defaulting to column but "
"this will raise an ambiguity error in a "
"future version") % gpr,
FutureWarning, stacklevel=5)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif gpr in obj.index.names:
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != len(obj):
raise ValueError("Categorical dtype grouper must "
"have len(grouper) == len(data)")
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
in_axis=in_axis) \
if not isinstance(gpr, Grouping) else gpr
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, compat.string_types) or
(val is not None and is_scalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,
_series_apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_doc = dedent("""
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
See also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='Series',
versionadded=''))
def aggregate(self, func_or_funcs, *args, **kwargs):
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
("using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0],
DataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs))
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = (self.size().fillna(0) > 0).any()
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notna(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
res = out if ids[0] != -1 else out[1:]
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
self._set_group_selection()
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from functools import partial
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.labels[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = _ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups or None)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
newb = result._data.blocks[0]
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
l = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + l)]
offset += l
return new_items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_non_None_value(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
return None
return v
v = first_non_None_value(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_non_None_value(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index,
MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index,
name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):
result = result.apply(
lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = True if any([isinstance(x, Timestamp)
for x in values]) else False
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass='DataFrame', selected='')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj)
def _transform_fast(self, result, obj):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = (self.size().fillna(0) > 0).any()
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
Returns
-------
filtered : DataFrame
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
See also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded=''))
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings]):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from functools import partial
from pandas.core.dtypes.missing import _isna_ndarraylike as isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isna(blk.get_values())) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj,
selection=col,
grouper=self.grouper).nunique(dropna=dropna)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
if not self.as_index:
results.index = _default_index(len(results))
return results
boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
pass
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = _ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise AbstractMethodError(self)
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
| bsd-3-clause |
stuart-knock/bokeh | bokeh/charts/builder/boxplot_builder.py | 41 | 11882 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the BoxPlot class which lets you build your BoxPlot plots just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
import pandas as pd
from ..utils import make_scatter, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Rect, Segment
from ...properties import Bool, String
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def BoxPlot(values, marker="circle", outliers=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kw):
""" Create a BoxPlot chart using :class:`BoxPlotBuilder <bokeh.charts.builder.boxplot_builder.BoxPlotBuilder>`
to render the geometry from values, marker and outliers arguments.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., `circle`.
outliers (bool, optional): Whether or not to plot outliers.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import BoxPlot, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames of arrays are valid inputs)
medals = dict([
('bronze', np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0])),
('silver', np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.])),
('gold', np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.]))
])
boxplot = BoxPlot(medals, marker="circle", outliers=True, title="boxplot",
xlabel="medal type", ylabel="medal count")
output_file('boxplot.html')
show(boxplot)
"""
return create_and_build(
BoxPlotBuilder, values, marker=marker, outliers=outliers,
xscale=xscale, yscale=yscale, xgrid=xgrid, ygrid=ygrid, **kw
)
class BoxPlotBuilder(Builder):
"""This is the BoxPlot class and it is in charge of plotting
scatter plots in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (rects, lines and markers)
taking the references from the source.
"""
# TODO: (bev) should be an enumeration
marker = String(help="""
The marker type to use (e.g., ``circle``) if outliers=True.
""")
outliers = Bool(help="""
Whether to display markers for any outliers.
""")
def _process_data(self):
"""Take the BoxPlot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad, segments and markers glyphs inside the ``_yield_renderers`` method.
Args:
cat (list): categories as a list of strings.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., ``circle``.
outliers (bool, optional): Whether to plot outliers.
values (dict or pd obj): the values to be plotted as bars.
"""
self._data_segment = dict()
self._attr_segment = []
self._data_rect = dict()
self._attr_rect = []
self._data_scatter = dict()
self._attr_scatter = []
self._data_legend = dict()
if isinstance(self._values, pd.DataFrame):
self._groups = self._values.columns
else:
self._groups = list(self._values.keys())
# add group to the self._data_segment dict
self._data_segment["groups"] = self._groups
# add group and witdh to the self._data_rect dict
self._data_rect["groups"] = self._groups
self._data_rect["width"] = [0.8] * len(self._groups)
# self._data_scatter does not need references to groups now,
# they will be added later.
# add group to the self._data_legend dict
self._data_legend["groups"] = self._groups
# all the list we are going to use to save calculated values
q0_points = []
q2_points = []
iqr_centers = []
iqr_lengths = []
lower_points = []
upper_points = []
upper_center_boxes = []
upper_height_boxes = []
lower_center_boxes = []
lower_height_boxes = []
out_x, out_y, out_color = ([], [], [])
colors = cycle_colors(self._groups, self.palette)
for i, (level, values) in enumerate(self._values.items()):
# Compute quantiles, center points, heights, IQR, etc.
# quantiles
q = np.percentile(values, [25, 50, 75])
q0_points.append(q[0])
q2_points.append(q[2])
# IQR related stuff...
iqr_centers.append((q[2] + q[0]) / 2)
iqr = q[2] - q[0]
iqr_lengths.append(iqr)
lower = q[0] - 1.5 * iqr
upper = q[2] + 1.5 * iqr
lower_points.append(lower)
upper_points.append(upper)
# rect center points and heights
upper_center_boxes.append((q[2] + q[1]) / 2)
upper_height_boxes.append(q[2] - q[1])
lower_center_boxes.append((q[1] + q[0]) / 2)
lower_height_boxes.append(q[1] - q[0])
# Store indices of outliers as list
outliers = np.where(
(values > upper) | (values < lower)
)[0]
for out in outliers:
o = values[out]
out_x.append(level)
out_y.append(o)
out_color.append(colors[i])
# Store
self.set_and_get(self._data_scatter, self._attr_scatter, "out_x", out_x)
self.set_and_get(self._data_scatter, self._attr_scatter, "out_y", out_y)
self.set_and_get(self._data_scatter, self._attr_scatter, "colors", out_color)
self.set_and_get(self._data_segment, self._attr_segment, "q0", q0_points)
self.set_and_get(self._data_segment, self._attr_segment, "lower", lower_points)
self.set_and_get(self._data_segment, self._attr_segment, "q2", q2_points)
self.set_and_get(self._data_segment, self._attr_segment, "upper", upper_points)
self.set_and_get(self._data_rect, self._attr_rect, "iqr_centers", iqr_centers)
self.set_and_get(self._data_rect, self._attr_rect, "iqr_lengths", iqr_lengths)
self.set_and_get(self._data_rect, self._attr_rect, "upper_center_boxes", upper_center_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "upper_height_boxes", upper_height_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "lower_center_boxes", lower_center_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "lower_height_boxes", lower_height_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "colors", colors)
def _set_sources(self):
"Push the BoxPlot data into the ColumnDataSource and calculate the proper ranges."
self._source_segment = ColumnDataSource(self._data_segment)
self._source_scatter = ColumnDataSource(self._data_scatter)
self._source_rect = ColumnDataSource(self._data_rect)
self._source_legend = ColumnDataSource(self._data_legend)
self.x_range = FactorRange(factors=self._source_segment.data["groups"])
start_y = min(self._data_segment[self._attr_segment[1]])
end_y = max(self._data_segment[self._attr_segment[3]])
## Expand min/max to encompass outliers
if self.outliers and self._data_scatter[self._attr_scatter[1]]:
start_out_y = min(self._data_scatter[self._attr_scatter[1]])
end_out_y = max(self._data_scatter[self._attr_scatter[1]])
# it could be no outliers in some sides...
start_y = min(start_y, start_out_y)
end_y = max(end_y, end_out_y)
self.y_range = Range1d(start=start_y - 0.1 * (end_y - start_y),
end=end_y + 0.1 * (end_y - start_y))
def _yield_renderers(self):
"""Use the several glyphs to display the Boxplot.
It uses the selected marker glyph to display the points, segments to
display the iqr and rects to display the boxes, taking as reference
points the data loaded at the ColumnDataSurce.
"""
ats = self._attr_segment
glyph = Segment(
x0="groups", y0=ats[1], x1="groups", y1=ats[0],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
glyph = Segment(
x0="groups", y0=ats[2], x1="groups", y1=ats[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
atr = self._attr_rect
glyph = Rect(
x="groups", y=atr[0], width="width", height=atr[1],
line_color="black", line_width=2, fill_color=None,
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
glyph = Rect(
x="groups", y=atr[2], width="width", height=atr[3],
line_color="black", fill_color=atr[6],
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
glyph = Rect(
x="groups", y=atr[4], width="width", height=atr[5],
line_color="black", fill_color=atr[6],
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
if self.outliers:
yield make_scatter(self._source_scatter, self._attr_scatter[0],
self._attr_scatter[1], self.marker,
self._attr_scatter[2])
# Some helper methods
def set_and_get(self, data, attr, val, content):
"""Set a new attr and then get it to fill the self._data dict.
Keep track of the attributes created.
Args:
data (dict): where to store the new attribute content
attr (list): where to store the new attribute names
val (string): name of the new attribute
content (obj): content of the new attribute
"""
self._set_and_get(data, "", attr, val, content)
| bsd-3-clause |
jjcc/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
mdeger/nest-simulator | extras/ConnPlotter/colormaps.py | 21 | 6941 | # -*- coding: utf-8 -*-
#
# colormaps.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Colormaps for ConnPlotter.
Provides the following functions and colormaps:
- make_colormap: based on color specification, create colormap
running from from white to fully saturated color
- redblue: from fully saturated red to white to fully saturated blue
- bluered: from fully saturated blue to white to fully saturated red
For all colormaps, "bad" values (NaN) are mapped to white.
Provides also ZeroCenterNorm, mapping negative values to 0..0.5,
positive to 0.5..1.
"""
# ----------------------------------------------------------------------------
import matplotlib.pyplot as plt
import matplotlib.colors as mc
import matplotlib.cbook as cbook
import numpy as np
__all__ = ['ZeroCenterNorm', 'make_colormap', 'redblue', 'bluered',
'bad_color']
# ----------------------------------------------------------------------------
bad_color = (1.0, 1.0, 0.9)
# ----------------------------------------------------------------------------
class ZeroCenterNorm(mc.Normalize):
"""
Normalize so that value 0 is always at 0.5.
Code from matplotlib.colors.Normalize.
Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved
http://matplotlib.sourceforge.net/users/license.html
"""
# ------------------------------------------------------------------------
def __call__(self, value, clip=None):
"""
Normalize given values to [0,1].
Returns data in same form as passed in.
value can be scalar or array.
"""
if clip is not None and clip is not False:
assert (False) # clip not supported
if cbook.iterable(value):
vtype = 'array'
val = np.ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = np.ma.array([value]).astype(np.float)
self.autoscale_None(val)
self.vmin = min(0, self.vmin)
self.vmax = max(0, self.vmax)
# imshow expects masked arrays
# fill entire array with 0.5
result = np.ma.array(0.5 * np.ma.asarray(np.ones(np.shape(val))),
dtype=np.float, mask=val.mask)
# change values != 0
result[val < 0] = 0.5 * (self.vmin - val[val < 0]) / self.vmin
result[val > 0] = 0.5 + 0.5 * val[val > 0] / self.vmax
if vtype == 'scalar':
result = result[0]
return result
# ------------------------------------------------------------------------
def inverse(self, value):
"""
Invert color map. Required by colorbar().
"""
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = np.asarray(value)
res = np.zeros(np.shape(val))
res[val < 0.5] = vmin - 2 * vmin * val[val < 0.5]
res[val > 0.5] = 2 * (val[val > 0.5] - 0.5) * vmax
return res
else:
if value == 0.5:
return 0
elif value < 0.5:
return vmin - 2 * vmin * value # vmin < 0
else:
return 2 * (value - 0.5) * vmax
# ----------------------------------------------------------------------------
def make_colormap(color):
"""
Create LinearSegmentedColormap ranging from white to the given color.
Color can be given in any legal color format. Bad color is set to white.
"""
try:
r, g, b = mc.colorConverter.to_rgb(color)
except:
raise ValueError('Illegal color specification: %s' % color.__repr__)
cm = mc.LinearSegmentedColormap(color.__str__(),
{'red': [(0.0, 1.0, 1.0),
(1.0, r, r)],
'green': [(0.0, 1.0, 1.0),
(1.0, g, g)],
'blue': [(0.0, 1.0, 1.0),
(1.0, b, b)]})
cm.set_bad(color=bad_color) # light yellow
return cm
# ----------------------------------------------------------------------------
redblue = mc.LinearSegmentedColormap('redblue',
{'red': [(0.0, 0.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)]})
redblue.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
bluered = mc.LinearSegmentedColormap('bluered',
{'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 1.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)]})
bluered.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
# this should be proper unit tests
n1 = ZeroCenterNorm()
if (n1([-1, -0.5, 0.0, 0.5, 1.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n1 ok")
else:
print("n1 failed.")
n2 = ZeroCenterNorm(-1, 2)
if (n2([-1, -0.5, 0.0, 1.0, 2.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n2 ok")
else:
print("n2 failed.")
| gpl-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/scipy/signal/_arraytools.py | 28 | 7553 | """
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exacty the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| mit |
vigilv/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
shoyer/xray | xarray/convert.py | 2 | 9924 | """Functions for converting to and from xarray objects
"""
from collections import Counter, OrderedDict
import numpy as np
import pandas as pd
from .coding.times import CFDatetimeCoder, CFTimedeltaCoder
from .conventions import decode_cf
from .core import duck_array_ops
from .core.dataarray import DataArray
from .core.dtypes import get_fill_value
cdms2_ignored_attrs = {'name', 'tileIndex'}
iris_forbidden_keys = {'standard_name', 'long_name', 'units', 'bounds', 'axis',
'calendar', 'leap_month', 'leap_year', 'month_lengths',
'coordinates', 'grid_mapping', 'climatology',
'cell_methods', 'formula_terms', 'compress',
'missing_value', 'add_offset', 'scale_factor',
'valid_max', 'valid_min', 'valid_range', '_FillValue'}
cell_methods_strings = {'point', 'sum', 'maximum', 'median', 'mid_range',
'minimum', 'mean', 'mode', 'standard_deviation',
'variance'}
def encode(var):
return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable))
def _filter_attrs(attrs, ignored_attrs):
""" Return attrs that are not in ignored_attrs
"""
return dict((k, v) for k, v in attrs.items() if k not in ignored_attrs)
def from_cdms2(variable):
"""Convert a cdms2 variable into an DataArray
"""
values = np.asarray(variable)
name = variable.id
dims = variable.getAxisIds()
coords = {}
for axis in variable.getAxisList():
coords[axis.id] = DataArray(
np.asarray(axis), dims=[axis.id],
attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs))
grid = variable.getGrid()
if grid is not None:
ids = [a.id for a in grid.getAxisList()]
for axis in grid.getLongitude(), grid.getLatitude():
if axis.id not in variable.getAxisIds():
coords[axis.id] = DataArray(
np.asarray(axis[:]), dims=ids,
attrs=_filter_attrs(axis.attributes,
cdms2_ignored_attrs))
attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs)
dataarray = DataArray(values, dims=dims, coords=coords, name=name,
attrs=attrs)
return decode_cf(dataarray.to_dataset())[dataarray.name]
def to_cdms2(dataarray, copy=True):
"""Convert a DataArray into a cdms2 variable
"""
# we don't want cdms2 to be a hard dependency
import cdms2
def set_cdms2_attrs(var, attrs):
for k, v in attrs.items():
setattr(var, k, v)
# 1D axes
axes = []
for dim in dataarray.dims:
coord = encode(dataarray.coords[dim])
axis = cdms2.createAxis(coord.values, id=dim)
set_cdms2_attrs(axis, coord.attrs)
axes.append(axis)
# Data
var = encode(dataarray)
cdms2_var = cdms2.createVariable(var.values, axes=axes, id=dataarray.name,
mask=pd.isnull(var.values), copy=copy)
# Attributes
set_cdms2_attrs(cdms2_var, var.attrs)
# Curvilinear and unstructured grids
if dataarray.name not in dataarray.coords:
cdms2_axes = OrderedDict()
for coord_name in set(dataarray.coords.keys()) - set(dataarray.dims):
coord_array = dataarray.coords[coord_name].to_cdms2()
cdms2_axis_cls = (cdms2.coord.TransientAxis2D
if coord_array.ndim else
cdms2.auxcoord.TransientAuxAxis1D)
cdms2_axis = cdms2_axis_cls(coord_array)
if cdms2_axis.isLongitude():
cdms2_axes['lon'] = cdms2_axis
elif cdms2_axis.isLatitude():
cdms2_axes['lat'] = cdms2_axis
if 'lon' in cdms2_axes and 'lat' in cdms2_axes:
if len(cdms2_axes['lon'].shape) == 2:
cdms2_grid = cdms2.hgrid.TransientCurveGrid(
cdms2_axes['lat'], cdms2_axes['lon'])
else:
cdms2_grid = cdms2.gengrid.AbstractGenericGrid(
cdms2_axes['lat'], cdms2_axes['lon'])
for axis in cdms2_grid.getAxisList():
cdms2_var.setAxis(cdms2_var.getAxisIds().index(axis.id), axis)
cdms2_var.setGrid(cdms2_grid)
return cdms2_var
def _pick_attrs(attrs, keys):
""" Return attrs with keys in keys list
"""
return dict((k, v) for k, v in attrs.items() if k in keys)
def _get_iris_args(attrs):
""" Converts the xarray attrs into args that can be passed into Iris
"""
# iris.unit is deprecated in Iris v1.9
import cf_units
args = {'attributes': _filter_attrs(attrs, iris_forbidden_keys)}
args.update(_pick_attrs(attrs, ('standard_name', 'long_name',)))
unit_args = _pick_attrs(attrs, ('calendar',))
if 'units' in attrs:
args['units'] = cf_units.Unit(attrs['units'], **unit_args)
return args
# TODO: Add converting bounds from xarray to Iris and back
def to_iris(dataarray):
""" Convert a DataArray into a Iris Cube
"""
# Iris not a hard dependency
import iris
from iris.fileformats.netcdf import parse_cell_methods
dim_coords = []
aux_coords = []
for coord_name in dataarray.coords:
coord = encode(dataarray.coords[coord_name])
coord_args = _get_iris_args(coord.attrs)
coord_args['var_name'] = coord_name
axis = None
if coord.dims:
axis = dataarray.get_axis_num(coord.dims)
if coord_name in dataarray.dims:
try:
iris_coord = iris.coords.DimCoord(coord.values, **coord_args)
dim_coords.append((iris_coord, axis))
except ValueError:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
else:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
args = _get_iris_args(dataarray.attrs)
args['var_name'] = dataarray.name
args['dim_coords_and_dims'] = dim_coords
args['aux_coords_and_dims'] = aux_coords
if 'cell_methods' in dataarray.attrs:
args['cell_methods'] = \
parse_cell_methods(dataarray.attrs['cell_methods'])
masked_data = duck_array_ops.masked_invalid(dataarray.data)
cube = iris.cube.Cube(masked_data, **args)
return cube
def _iris_obj_to_attrs(obj):
""" Return a dictionary of attrs when given a Iris object
"""
attrs = {'standard_name': obj.standard_name,
'long_name': obj.long_name}
if obj.units.calendar:
attrs['calendar'] = obj.units.calendar
if obj.units.origin != '1' and not obj.units.is_unknown():
attrs['units'] = obj.units.origin
attrs.update(obj.attributes)
return dict((k, v) for k, v in attrs.items() if v is not None)
def _iris_cell_methods_to_str(cell_methods_obj):
""" Converts a Iris cell methods into a string
"""
cell_methods = []
for cell_method in cell_methods_obj:
names = ''.join(['{}: '.format(n) for n in cell_method.coord_names])
intervals = ' '.join(['interval: {}'.format(interval)
for interval in cell_method.intervals])
comments = ' '.join(['comment: {}'.format(comment)
for comment in cell_method.comments])
extra = ' '.join([intervals, comments]).strip()
if extra:
extra = ' ({})'.format(extra)
cell_methods.append(names + cell_method.method + extra)
return ' '.join(cell_methods)
def _name(iris_obj, default='unknown'):
""" Mimicks `iris_obj.name()` but with different name resolution order.
Similar to iris_obj.name() method, but using iris_obj.var_name first to
enable roundtripping.
"""
return (iris_obj.var_name or iris_obj.standard_name or
iris_obj.long_name or default)
def from_iris(cube):
""" Convert a Iris cube into an DataArray
"""
import iris.exceptions
from xarray.core.pycompat import dask_array_type
name = _name(cube)
if name == 'unknown':
name = None
dims = []
for i in range(cube.ndim):
try:
dim_coord = cube.coord(dim_coords=True, dimensions=(i,))
dims.append(_name(dim_coord))
except iris.exceptions.CoordinateNotFoundError:
dims.append("dim_{}".format(i))
if len(set(dims)) != len(dims):
duplicates = [k for k, v in Counter(dims).items() if v > 1]
raise ValueError('Duplicate coordinate name {}.'.format(duplicates))
coords = OrderedDict()
for coord in cube.coords():
coord_attrs = _iris_obj_to_attrs(coord)
coord_dims = [dims[i] for i in cube.coord_dims(coord)]
if coord_dims:
coords[_name(coord)] = (coord_dims, coord.points, coord_attrs)
else:
coords[_name(coord)] = ((), coord.points.item(), coord_attrs)
array_attrs = _iris_obj_to_attrs(cube)
cell_methods = _iris_cell_methods_to_str(cube.cell_methods)
if cell_methods:
array_attrs['cell_methods'] = cell_methods
# Deal with iris 1.* and 2.*
cube_data = cube.core_data() if hasattr(cube, 'core_data') else cube.data
# Deal with dask and numpy masked arrays
if isinstance(cube_data, dask_array_type):
from dask.array import ma as dask_ma
filled_data = dask_ma.filled(cube_data, get_fill_value(cube.dtype))
elif isinstance(cube_data, np.ma.MaskedArray):
filled_data = np.ma.filled(cube_data, get_fill_value(cube.dtype))
else:
filled_data = cube_data
dataarray = DataArray(filled_data, coords=coords, name=name,
attrs=array_attrs, dims=dims)
decoded_ds = decode_cf(dataarray._to_temp_dataset())
return dataarray._from_temp_dataset(decoded_ds)
| apache-2.0 |
zzcclp/spark | python/pyspark/pandas/tests/data_type_ops/test_categorical_ops.py | 6 | 8721 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class CategoricalOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([1, "x", "y"], dtype="category")
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def other_pser(self):
return pd.Series(["y", "x", 1], dtype="category")
@property
def other_psser(self):
return ps.from_pandas(self.other_pser)
def test_add(self):
self.assertRaises(TypeError, lambda: self.psser + "x")
self.assertRaises(TypeError, lambda: self.psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [1, "x", "y"]
pser = pd.Series(data, dtype="category")
psser = ps.Series(data, dtype="category")
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
self.assert_eq(self.pser.isnull(), self.psser.isnull())
def test_astype(self):
data = [1, 2, 3]
pser = pd.Series(data, dtype="category")
psser = ps.from_pandas(pser)
self.assert_eq(pser.astype(int), psser.astype(int))
self.assert_eq(pser.astype(float), psser.astype(float))
self.assert_eq(pser.astype(np.float32), psser.astype(np.float32))
self.assert_eq(pser.astype(np.int32), psser.astype(np.int32))
self.assert_eq(pser.astype(np.int16), psser.astype(np.int16))
self.assert_eq(pser.astype(np.int8), psser.astype(np.int8))
self.assert_eq(pser.astype(str), psser.astype(str))
self.assert_eq(pser.astype(bool), psser.astype(bool))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[3, 1, 2])
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
else:
self.assert_eq(pd.Series(data).astype(cat_type), psser.astype(cat_type))
def test_neg(self):
self.assertRaises(TypeError, lambda: -self.psser)
def test_abs(self):
self.assertRaises(TypeError, lambda: abs(self.psser))
def test_invert(self):
self.assertRaises(TypeError, lambda: ~self.psser)
def test_eq(self):
with option_context("compute.ops_on_diff_frames", True):
self.assert_eq(
self.pser == self.other_pser, (self.psser == self.other_psser).sort_index()
)
self.assert_eq(self.pser == self.pser, (self.psser == self.psser).sort_index())
def test_ne(self):
with option_context("compute.ops_on_diff_frames", True):
self.assert_eq(
self.pser != self.other_pser, (self.psser != self.other_psser).sort_index()
)
self.assert_eq(self.pser != self.pser, (self.psser != self.psser).sort_index())
def test_lt(self):
self.assertRaises(NotImplementedError, lambda: self.psser < self.other_psser)
def test_le(self):
self.assertRaises(NotImplementedError, lambda: self.psser <= self.other_psser)
def test_gt(self):
self.assertRaises(NotImplementedError, lambda: self.psser > self.other_psser)
def test_ge(self):
self.assertRaises(NotImplementedError, lambda: self.psser >= self.other_psser)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_categorical_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
KennyCandy/HAR | help/HAR_v4_3.py | 1 | 17410 | # Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'traina'
N_HIDDEN_CONFIG = 16
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 3
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 16])
b_conv1 = bias_varibale([16])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 16, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close() | mit |
wwunlp/sner | sner/models/ner.py | 1 | 11822 | """NER Model"""
from sner.classes import Display, Rule, Token
from sner.scripts.ner import contextualfromnames, namesfromrule
from sner.scripts.ner import spellingfromnames, updatetokenstrength
import matplotlib.pyplot as plt
import pandas as pd
def import_corpus(corpus_path, display):
"""
Imports the corpus for the ner model, from a format of our own design.
The format is a CSV containing individual words in the corpus, with columns
containing the Tablet ID, the line number within that tablet, the word
number within that line, the word itself, and any annotation associated
with that word.
Args:
corpus_path (str): Path of the corpus file.
display (Display): Utility object used to print the progress of scanning
the corpus file.
Returns:
corpus (set): Set of Token objects, properly initialized from the input
data.
Raises:
None
"""
corpus = set()
cols = ['Tablet ID', 'Line Number', 'Word Number', 'Word', 'Word Type']
data = pd.read_csv(corpus_path, names=cols, header=0)
for i in range(len(data) - 1):
if data.loc[i, 'Word Number'] > 0:
left_context = data.loc[i - 1, 'Word']
else:
left_context = None
word = data.loc[i, 'Word']
if data.loc[i, 'Line Number'] == data.loc[i + 1, 'Line Number']:
right_context = data.loc[i + 1, 'Word']
else:
right_context = None
word_type = Token.find_type(data.loc[i, 'Word Type'])
token = Token(left_context, word, right_context, word_type)
corpus.add(token)
display.update_progress_bar(i + 1, len(data))
return corpus
def import_seed_rules(seed_rules_path, display):
"""
This function will read the seed rule file, and return the contents as
a set. The file is a CSV formatted with columns containing the rule type,
the contents of the rule, and its strength rating (the probability that a
token is a name if the rule applies to said token).
Args:
rulename (str): Location of seed rules file.
Returns:
rules (set): Set of Rule objects corresponding to the rules in the
seed rules file.
Raises:
None
"""
seed_rules = set()
cols = ['Rule Type', 'Rule', 'Strength']
data = pd.read_csv(seed_rules_path, names=cols, header=0)
for i in range(len(data) - 1):
rule_type = Rule.find_type(data.loc[i, 'Rule Type'])
rule = data.loc[i, 'Rule']
strength = data.loc[i, 'Strength']
rule = Rule(rule_type, rule, strength)
seed_rules.add(rule)
display.update_progress_bar(i + 1, len(data))
return seed_rules
def assess_strength(rules, corpus, config):
"""
Evaluates the accuracy of the strength rating of the passed-in rules. This
is useful because the ner model will generate rules in an unsupervised
fashion. This function gets used to evaluate the performance of that
process.
Args:
rules (set): A set of Rule objects to be evaluated
corpus (set): A set of Token objects, representing the entire Garshana
corpus.
Returns:
None
Raises:
None
"""
bad_rules = 0
bad_context = 0
bad_spelling = 0
total_context = 0
total_spelling = 0
total_delta = 0
est_false_positives = 0
print("rule performance:")
print("calculating...", end='\r')
i = 0
cols = {
'Iteration' : [],
'Rule' : [],
'Type' : [],
'Strength' : [],
'True Strength': [],
'Occurrences' : []
}
output = pd.DataFrame(data=cols)
x_vals = []
y_vals = []
rule_num = 1
for rule in rules:
names = namesfromrule.main(corpus, rule)
real_names = 0
total_names = len(names)
for token in names:
if token.type == Token.Type.personal_name:
real_names += 1
if total_names == 0:
true_strength = 0
else:
true_strength = real_names / total_names
delta = abs(true_strength - rule.strength)
total_delta += delta
x_vals.append(rule_num)
rule_num += 1
y_vals.append(delta)
if rule.type == Rule.Type.spelling:
total_spelling += 1
else:
total_context += 1
#if a rule is more than 20% from its true value, it is 'bad'
if delta > 0.2:
bad_rules += 1
if rule.type == Rule.Type.spelling:
bad_spelling += 1
else:
bad_context += 1
i += 1
output.loc[i, 'Iteration'] = rule.iteration
output.loc[i, 'Rule'] = rule.contents
output.loc[i, 'Type'] = rule.type.name
output.loc[i, 'Strength'] = rule.strength
output.loc[i, 'True Strength'] = true_strength
output.loc[i, 'Occurrences'] = rule.occurrences
output_path = config['path'].format(
'ner',
time.strftime('%Y%m%d_%H%M'),
'output.csv'
)
output.to_csv(path_or_buf=output_path)
print(" ", end='\r')
print("percentage of bad rules: {}%".format(
100 * bad_rules / len(rules)
))
print("percentage of bad context: {}%".format(
100 * bad_context / total_context
))
print("percentage of bad spelling: {}%".format(
100 * bad_spelling / total_spelling
))
print("average delta value: {}%".format(
100 * total_delta / len(rules)
))
plt.xlabel('Rules')
plt.ylabel('Delta')
plt.title('Plot of Delta per Rule')
plt.plot(x_vals, y_vals, 'ro')
plt.axis([min(x_vals), max(x_vals), min(y_vals), max(y_vals)])
plt.show()
sort_y = sorted(y_vals)
plt.xlabel('')
plt.ylabel('Delta')
plt.title('Delta Sorted')
plt.plot(x_vals, sort_y, 'ro')
plt.axis([min(x_vals), max(x_vals), min(sort_y), max(sort_y)])
plt.show()
def get_new_names(corpus, names, rules):
"""
Meant to use the provided ruleset to scan the corpus for new names.
It will then return the names in quesiton, which will be used
to generate more rules.
Basically, it grabs all tokens from the corpus matching the rules in
question and then return them as a set. The names parameter lets you
specify tokens that are already recognized as names, allowing you to
retrieve only new name results.
Args:
corpus (set): Set of Token objects representing the entire Garshana
corpus.
names (set): Set of Tokens already recognized as names.
rules (set): Set of Rule objects used to find new names
Returns:
new_names (set): Set of Token objects
Raises:
None
"""
new_names = set()
for rule in rules:
results = namesfromrule.main(corpus, rule)
for name in results:
if name not in names:
new_names.add(name)
return new_names
def print_precision_and_recall(selected_elements, relevant_elements, i, log):
"""
Prints the precision, recall, and F1 score of the algorithm. Used by
passing in tokens in the selected_elements parameter. These tokens are
the tokens considered to be names. Relevant elements is just the total
number of names in the corpus.
Args:
selected_elements (set): Set of Token objects representing names as
identified by the algorithm.
relevant_elements (int): Total number of names that exist in the
corpus.
i (int): Index of current log entry.
log (pandas.DataFrame): Data structure containing logs
Returns:
None
Raises:
None
"""
positives = 0.0
true_positives = 0.0
precision = 0.0
recall = 0.0
f1_score = 0.0
for token in selected_elements:
positives += 1.0
if token.type == Token.Type.personal_name:
true_positives += 1.0
if positives == 0.0:
precision = 0.0
else:
precision = true_positives / positives * 100
if relevant_elements == 0.0:
recall = 0.0
else:
recall = true_positives / relevant_elements * 100
if precision + recall == 0.0:
f1_score = 0.0
else:
f1_score = 2.0 * precision * recall / (precision + recall)
print("Precision: {:06.4f}%".format(precision))
print("Recall: {:06.4f}%".format(recall))
print("F1 Score: {:06.4f}\n".format(f1_score))
log.loc[i, 'Precision'] = precision
log.loc[i, 'Recall'] = recall
log.loc[i, 'F1 Score'] = f1_score
def main(config):
"""
Rules and names will be lists of RuleSets or TokenSets.
These sets will represent the results of various iterations of
the algorithm. So index 0 of rules would be the first rule set
(seed rules) and 1 would be the first rules generated and used by the
algorithm itself. Index zero of names would be the names that came from
the seed rules. Index one the rules that came from rule set 1. And so on.
Args:
config (dict): Dictionary containing confiruation information, such as
the location of the input files, as well as various
flags and runtime parameters. (defined in sner.py)
Returns:
None
Raises:
None
"""
path = config['path']
corpus_path = path + config['corpus']
seed_rules_path = path + config['seed-rules']
iterations = config['iterations']
max_rules = config['max_rules']
log_cols = {
'Iteration Type': [],
'New Rules' : [],
'New Names' : [],
'Precision' : [],
'Recall' : [],
'F1 Score' : []
}
log = pd.DataFrame(data=log_cols)
display = Display()
display.start("Importing Corpus: {}".format(corpus_path))
corpus = import_corpus(corpus_path, display)
display.finish()
display.start("Importing Seed Rules: {}".format(seed_rules_path))
seed_rules = import_seed_rules(seed_rules_path, display)
display.finish()
updatetokenstrength.main(corpus, seed_rules)
rule_set = set()
rule_set = rule_set.union(seed_rules)
name_set = set()
new_names = get_new_names(corpus, name_set, seed_rules)
name_set = name_set.union(new_names)
relevant_elements = 0.0
for token in corpus:
if token.type == Token.Type.personal_name:
relevant_elements += 1.0
display.start(
"Starting Model:\n {} iterations, {} max rules".format(
iterations,
max_rules
)
)
for i in range(1, iterations + 1):
if i % 2 == 0:
iter_type = 'context'
get_new_rules = contextualfromnames.main
else:
iter_type = 'spelling'
get_new_rules = spellingfromnames.main
print("Iteration {}: find {} rules".format(i, iter_type))
new_rules = get_new_rules(
corpus,
rule_set,
name_set,
max_rules,
i,
config,
display
)
print("Found {} new {} rules".format(len(new_rules), iter_type))
rule_set = rule_set.union(new_rules)
updatetokenstrength.main(corpus, rule_set)
new_names = get_new_names(corpus, name_set, new_rules)
name_set = name_set.union(new_names)
print("top {} rules found {} new names".format(
len(new_rules), len(new_names)))
log.loc[i, 'Iteration Type'] = iter_type
log.loc[i, 'New Rules'] = len(new_rules)
log.loc[i, 'New Names'] = len(new_names)
print_precision_and_recall(name_set, relevant_elements, i, log)
display.finish()
log.to_csv(path_or_buf=data.log)
assess_strength(rule_set, corpus, data)
| mit |
rflamary/POT | examples/plot_gromov_barycenter.py | 4 | 7325 | # -*- coding: utf-8 -*-
"""
=====================================
Gromov-Wasserstein Barycenter example
=====================================
This example is designed to show how to use the Gromov-Wasserstein distance
computation in POT.
"""
# Author: Erwan Vautier <[email protected]>
# Nicolas Courty <[email protected]>
#
# License: MIT License
import numpy as np
import scipy as sp
import scipy.ndimage as spi
import matplotlib.pylab as pl
from sklearn import manifold
from sklearn.decomposition import PCA
import ot
##############################################################################
# Smacof MDS
# ----------
#
# This function allows to find an embedding of points given a dissimilarity matrix
# that will be given by the output of the algorithm
def smacof_mds(C, dim, max_iter=3000, eps=1e-9):
"""
Returns an interpolated point cloud following the dissimilarity matrix C
using SMACOF multidimensional scaling (MDS) in specific dimensionned
target space
Parameters
----------
C : ndarray, shape (ns, ns)
dissimilarity matrix
dim : int
dimension of the targeted space
max_iter : int
Maximum number of iterations of the SMACOF algorithm for a single run
eps : float
relative tolerance w.r.t stress to declare converge
Returns
-------
npos : ndarray, shape (R, dim)
Embedded coordinates of the interpolated point cloud (defined with
one isometry)
"""
rng = np.random.RandomState(seed=3)
mds = manifold.MDS(
dim,
max_iter=max_iter,
eps=1e-9,
dissimilarity='precomputed',
n_init=1)
pos = mds.fit(C).embedding_
nmds = manifold.MDS(
2,
max_iter=max_iter,
eps=1e-9,
dissimilarity="precomputed",
random_state=rng,
n_init=1)
npos = nmds.fit_transform(C, init=pos)
return npos
##############################################################################
# Data preparation
# ----------------
#
# The four distributions are constructed from 4 simple images
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
square = spi.imread('../data/square.png').astype(np.float64)[:, :, 2] / 256
cross = spi.imread('../data/cross.png').astype(np.float64)[:, :, 2] / 256
triangle = spi.imread('../data/triangle.png').astype(np.float64)[:, :, 2] / 256
star = spi.imread('../data/star.png').astype(np.float64)[:, :, 2] / 256
shapes = [square, cross, triangle, star]
S = 4
xs = [[] for i in range(S)]
for nb in range(4):
for i in range(8):
for j in range(8):
if shapes[nb][i, j] < 0.95:
xs[nb].append([j, 8 - i])
xs = np.array([np.array(xs[0]), np.array(xs[1]),
np.array(xs[2]), np.array(xs[3])])
##############################################################################
# Barycenter computation
# ----------------------
ns = [len(xs[s]) for s in range(S)]
n_samples = 30
"""Compute all distances matrices for the four shapes"""
Cs = [sp.spatial.distance.cdist(xs[s], xs[s]) for s in range(S)]
Cs = [cs / cs.max() for cs in Cs]
ps = [ot.unif(ns[s]) for s in range(S)]
p = ot.unif(n_samples)
lambdast = [[float(i) / 3, float(3 - i) / 3] for i in [1, 2]]
Ct01 = [0 for i in range(2)]
for i in range(2):
Ct01[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[1]],
[ps[0], ps[1]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
Ct02 = [0 for i in range(2)]
for i in range(2):
Ct02[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[2]],
[ps[0], ps[2]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
Ct13 = [0 for i in range(2)]
for i in range(2):
Ct13[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[1], Cs[3]],
[ps[1], ps[3]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
Ct23 = [0 for i in range(2)]
for i in range(2):
Ct23[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[2], Cs[3]],
[ps[2], ps[3]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
##############################################################################
# Visualization
# -------------
#
# The PCA helps in getting consistency between the rotations
clf = PCA(n_components=2)
npos = [0, 0, 0, 0]
npos = [smacof_mds(Cs[s], 2) for s in range(S)]
npost01 = [0, 0]
npost01 = [smacof_mds(Ct01[s], 2) for s in range(2)]
npost01 = [clf.fit_transform(npost01[s]) for s in range(2)]
npost02 = [0, 0]
npost02 = [smacof_mds(Ct02[s], 2) for s in range(2)]
npost02 = [clf.fit_transform(npost02[s]) for s in range(2)]
npost13 = [0, 0]
npost13 = [smacof_mds(Ct13[s], 2) for s in range(2)]
npost13 = [clf.fit_transform(npost13[s]) for s in range(2)]
npost23 = [0, 0]
npost23 = [smacof_mds(Ct23[s], 2) for s in range(2)]
npost23 = [clf.fit_transform(npost23[s]) for s in range(2)]
fig = pl.figure(figsize=(10, 10))
ax1 = pl.subplot2grid((4, 4), (0, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax1.scatter(npos[0][:, 0], npos[0][:, 1], color='r')
ax2 = pl.subplot2grid((4, 4), (0, 1))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax2.scatter(npost01[1][:, 0], npost01[1][:, 1], color='b')
ax3 = pl.subplot2grid((4, 4), (0, 2))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax3.scatter(npost01[0][:, 0], npost01[0][:, 1], color='b')
ax4 = pl.subplot2grid((4, 4), (0, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax4.scatter(npos[1][:, 0], npos[1][:, 1], color='r')
ax5 = pl.subplot2grid((4, 4), (1, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax5.scatter(npost02[1][:, 0], npost02[1][:, 1], color='b')
ax6 = pl.subplot2grid((4, 4), (1, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax6.scatter(npost13[1][:, 0], npost13[1][:, 1], color='b')
ax7 = pl.subplot2grid((4, 4), (2, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax7.scatter(npost02[0][:, 0], npost02[0][:, 1], color='b')
ax8 = pl.subplot2grid((4, 4), (2, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax8.scatter(npost13[0][:, 0], npost13[0][:, 1], color='b')
ax9 = pl.subplot2grid((4, 4), (3, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax9.scatter(npos[2][:, 0], npos[2][:, 1], color='r')
ax10 = pl.subplot2grid((4, 4), (3, 1))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax10.scatter(npost23[1][:, 0], npost23[1][:, 1], color='b')
ax11 = pl.subplot2grid((4, 4), (3, 2))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax11.scatter(npost23[0][:, 0], npost23[0][:, 1], color='b')
ax12 = pl.subplot2grid((4, 4), (3, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax12.scatter(npos[3][:, 0], npos[3][:, 1], color='r')
| mit |
Sunhick/ml-tutorials | classification/naive-bayes/nbayes.py | 1 | 1074 | #
# Author: Sunil
# credits: https://rpubs.com/dvorakt/144238
#
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
import json
from pprint import pprint
with open('data.json') as df:
data = json.load(df)
emails = list()
types = list()
for d in data:
emails.append(d["message"])
types.append(d["type"])
evec = CountVectorizer()
features = evec.fit_transform(emails)
le = preprocessing.LabelEncoder()
labeler = le.fit(types)
target = labeler.transform(types)
gnb = GaussianNB()
model = gnb.fit(features.todense(), target.T)
test = ["viagra meet"]
Xpredict = evec.transform(test)
pl = model.predict(Xpredict.todense())
print(le.inverse_transform(pl))
#
# useful links:
# http://stackoverflow.com/questions/19984957/scikit-predict-default-threshold
# http://scikit-learn.org/stable/modules/naive_bayes.html
# https://www.analyticsvidhya.com/blog/2015/09/naive-bayes-explained/
# | mit |
berkeley-stat222/mousestyles | mousestyles/classification/clustering.py | 3 | 7621 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from scipy.cluster.hierarchy import linkage
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn import metrics
import numpy as np
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
from mousestyles.data.utils import day_to_mouse_average
# prep data functions
def prep_data(mouse_data, melted=False, std=True, rescale=True):
"""
Returns a ndarray data to be used in clustering algorithms:
column 0 : strain,
column 1: mouse,
other columns corresponding to feature avg/std of a mouse over 16 days
that may or may not be rescaled to the same unit as specified
Parameters
----------
mouse_data:
(i) a 21131 * (4 + ) pandas DataFrame,
column 0 : strain,
column 1: mouse,
column 2: day,
column 3: hour,
other columns corresponding to features
or
(ii) a 1921 * (3 + ) pandas DataFrame,
column 0: strain,
column 1: mouse,
column 2: day,
other columns corresponding to features
melted: bool,
False if the input mouse_data is of type (i)
std: bool,
whether the standard deviation of each feature is returned
rescale: bool,
whether each column is rescaled or not (rescale is performed by the
column's maximum)
Returns
-------
The ndarray as specified
"""
if melted:
mouse_X = np.array(mouse_data.iloc[:, 3:], dtype=float)
else:
mouse_X = np.array(mouse_data.iloc[:, 4:], dtype=float)
mouse_labels = np.array(mouse_data.iloc[:, 0:3])
mouse_dayavg, mouse_daystd = day_to_mouse_average(
mouse_X, mouse_labels, num_strains=16, stdev=True, stderr=False)
mouse_dayavgstd = np.hstack([mouse_dayavg, mouse_daystd[:, 2:]])
mouse_dayavgstd_X = mouse_dayavgstd[:, 2:]
mouse_dayavgstd_X_scl = mouse_dayavgstd_X / np.max(
mouse_dayavgstd_X, axis=0)
mouse_dayavgstd_scl = np.hstack(
[mouse_dayavgstd[:, 0:2], mouse_dayavgstd_X_scl])
if (std is False and rescale is False):
return mouse_dayavg
elif (std is True and rescale is True):
return mouse_dayavgstd
elif (std is False and rescale is True):
return mouse_dayavgstd_scl[:, 0:(mouse_dayavg.shape[1])]
else:
return mouse_dayavgstd_scl
# model fitting functions
def get_optimal_hc_params(mouse_day):
"""
Returns a list of 2: [method, dist]
method: {'ward', 'average', 'complete'}
dist: {'cityblock', 'euclidean', 'chebychev'}
Parameters
----------
mouse_day: a 170 * M numpy array,
column 0 : strain,
column 1: mouse,
other columns corresponding to feature avg/std of a mouse over 16 days
Returns
-------
method_distance: list
[method, dist]
"""
methods = ['ward', 'average', 'complete']
dists = ['cityblock', 'euclidean', 'chebychev']
method_dists = [(methods[i], dists[j]) for i in range(len(methods))
for j in range(len(dists))]
method_dists = [(method, dist) for method, dist in method_dists
if method != 'ward' or dist == 'euclidean']
cs = []
for method, dist in method_dists:
Z = linkage(mouse_day[:, 2:], method=method, metric=dist)
c, coph_dists = cophenet(Z, pdist(mouse_day[:, 2:]))
cs.append(c)
# determine the distance method
method, dist = method_dists[np.argmax(cs)]
return [method, dist]
def fit_hc(mouse_day_X, method, dist, num_clusters=range(2, 17)):
"""
Returns a list of 2: [silhouettes, cluster_labels]
silhouettes: list of float,
cluster_labels: list of list,
each sublist is the labels corresponding to the silhouette
Parameters
----------
mouse_day_X: a 170 * M numpy array,
all columns corresponding to feature avg/std of a mouse over 16 days
method: str,
method of calculating distance between clusters
dist: str,
distance metric
num_clusters: range
range of number of clusters
Returns
-------
A list of 2: [silhouettes, cluster_labels]
"""
if (dist == "chebychev"):
dist = "chebyshev"
cluster_labels = []
silhouettes = []
for n_clusters in num_clusters:
clustering = AgglomerativeClustering(
linkage=method, n_clusters=n_clusters)
clustering.fit(mouse_day_X)
labels = clustering.labels_
silhouettes.append(metrics.silhouette_score(
mouse_day_X, labels, metric=dist))
cluster_labels.append(list(labels))
return [silhouettes, cluster_labels]
def get_optimal_fit_kmeans(mouse_X, num_clusters, raw=False):
"""
Returns a list of 2: [silhouettes, cluster_labels]
silhouettes: list of float,
cluster_labels: list of list,
each sublist is the labels corresponding to the silhouette
Parameters
----------
mouse_X: a 170 * M numpy array or 21131 * M numpy array,
all columns corresponding to feature avg/std of a mouse over 16 days
or the raw data without averaging over days
num_clusters: range or a list or a numpy array
range of number of clusters
raw: a boolean with default is False
False if using the 170 * M array
Returns
-------
A list of 2: [silhouettes, cluster_labels]
"""
if raw:
sample_amount = 1000
else:
sample_amount = mouse_X.shape[0]
cluster_labels = []
silhouettes = []
for n_clusters in num_clusters:
clustering = KMeans(n_clusters=n_clusters)
clustering.fit(mouse_X)
labels = clustering.labels_
silhouettes.append(
metrics.silhouette_score(
mouse_X, labels, metric="euclidean",
sample_size=sample_amount))
cluster_labels.append(list(labels))
return [silhouettes, cluster_labels]
def cluster_in_strain(labels_first, labels_second):
"""
Returns a dictionary object indicating the count of different
clusters in each different strain (when put cluster labels as first)
or the count of different strain in each clusters (when put strain
labels as first).
Parameters
----------
labels_first: numpy arrary or list
A numpy arrary or list of integers representing which cluster
the mice in, or representing which strain mice in.
labels_second: numpy arrary or list
A numpy array or list of integers (0-15) representing which strain
the mice in, or representing which cluster the mice in
Returns
-------
count_data : dictionary
A dictioanry object with key is the strain number and value is a list
indicating the distribution of clusters, or the key is the cluster
number and the value is a list indicating the distribution of each
strain.
Examples
--------
>>> count_1 = cluster_in_strain([1,2,1,0,0],[0,1,1,2,1])
"""
count_data = {}
labels_first = np.asarray(labels_first)
labels_second = np.asarray(labels_second)
for label_2 in np.unique(labels_second):
label_2_index = labels_second == label_2
label_1_sub = labels_first[label_2_index]
count_list = []
for label_1 in np.unique(labels_first):
count_list.append(sum(label_1_sub == label_1))
count_data[label_2] = count_list
return count_data
| bsd-2-clause |
imh/gnss-analysis | tests/test_pandas.py | 1 | 3277 | #!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Bhaskar Mookerji <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""Basic integration tests for pandas GPS time interpolation
utilities.
"""
from pandas.tslib import Timestamp, Timedelta
import gnss_analysis.hitl_table_utils as t
import numpy as np
import os
import pandas as pd
import pytest
def test_interpolate_gps_time():
filename = "data/serial-link-20150429-163230.log.json.hdf5"
assert os.path.isfile(filename)
with pd.HDFStore(filename) as store:
idx = store.rover_spp.T.host_offset.reset_index()
model = t.interpolate_gpst_model(idx)
assert isinstance(model, pd.stats.ols.OLS)
assert np.allclose([model.beta.x, model.beta.intercept],
[1.00000368376, -64.2579561376])
init_offset = store.rover_spp.T.host_offset[0]
init_date = store.rover_spp.T.index[0]
f = lambda t1: t.apply_gps_time(t1*t.MSEC_TO_SEC, init_date, model)
dates = store.rover_logs.T.host_offset.apply(f)
l = dates.tolist()
start, end = l[0], l[-1]
assert start == Timestamp("2015-04-29 23:32:55.272075")
assert end == Timestamp("2015-04-29 23:57:46.457568")
init_secs_offset \
= store.rover_spp.T.host_offset[0] - store.rover_logs.T.index[0]
assert np.allclose([init_secs_offset*t.MSEC_TO_SEC], [55.859])
assert (init_date - start) == Timedelta('0 days 00:00:55.848925')
assert (end - init_date) == Timedelta('0 days 00:23:55.336568')
assert pd.DatetimeIndex(dates).is_monotonic_increasing
assert dates.shape == (2457,)
@pytest.mark.slow
def test_gps_time_col():
filename = "data/serial-link-20150429-163230.log.json.hdf5"
assert os.path.isfile(filename)
with pd.HDFStore(filename) as store:
tables = ['rover_iar_state', 'rover_logs', 'rover_tracking']
t.get_gps_time_col(store, tables)
gpst = store.rover_iar_state.T.approx_gps_time
assert gpst.shape == (1487,)
assert pd.DatetimeIndex(gpst).is_monotonic_increasing
gpst = store.rover_logs.T.approx_gps_time
assert gpst.shape == (2457,)
assert pd.DatetimeIndex(gpst).is_monotonic_increasing
gpst = store.rover_tracking[:, 'approx_gps_time', :]
assert gpst.shape == (32, 7248)
def test_gaps():
td = pd.DatetimeIndex(['2015-05-21 21:24:52.200000',
'2015-05-21 21:24:52.400000',
'2015-05-21 21:24:52.600000',
'2015-05-21 21:25:52.800000',
'2015-05-21 21:27:53'],
dtype='datetime64[ns]',
freq=None, tz=None)
assert np.allclose(t.find_largest_gaps(td, 10).values,
[120.2, 60.2, 0.2, 0.2])
assert np.allclose(t.find_largest_gaps(td, 1).values, [120.2])
assert np.allclose(t.find_largest_gaps(td[0:2], 10).values, [0.2])
assert np.allclose(t.find_largest_gaps(td[0:1], 10).values, [0])
| lgpl-3.0 |
hktxt/MachineLearning | voiceprint/Test_EER.py | 1 | 1518 | import numpy as np
import os
import time
import itertools
import random
from numpy import linalg as LA
def ConsinDistance(feaV1, feaV2):
return np.dot(feaV1, feaV2) / (LA.norm(feaV1) * LA.norm(feaV2))
def calculate_eer(y, y_score):
# y denotes groundtruth scores,
# y_score denotes the prediction scores.
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from scipy.interpolate import interp1d
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
thresh = interp1d(fpr, thresholds)(eer)
return eer, thresh
import librosa
if __name__ == '__main__':
rcg=np.load('test/rcg9.npy')
reg=np.load('test/reg9.npy')
reg=reg.item()
rcg=rcg.item()
scores=[]
labels=[]
for key0,value0 in rcg.items():
id0=key0.split('/')[0]
feat0=value0
for key1,value1 in reg.items():
id1=key1.split('/')[0]
feat1=value1
score =ConsinDistance(feat0,feat1)
scores.append(score)
label=0
if id1==id0:
label=1
labels.append(label)
labels=np.array(labels)
scores=np.array(scores)
eer, thresh = calculate_eer(labels, scores)
print('Thresh : {}, EER: {}'.format(thresh, eer))
| gpl-3.0 |
datapythonista/pandas | pandas/tests/indexes/datetimes/test_unique.py | 4 | 2206 | from datetime import (
datetime,
timedelta,
)
import pytest
from pandas import (
DatetimeIndex,
NaT,
Timestamp,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"arr, expected",
[
(DatetimeIndex(["2017", "2017"]), DatetimeIndex(["2017"])),
(
DatetimeIndex(["2017", "2017"], tz="US/Eastern"),
DatetimeIndex(["2017"], tz="US/Eastern"),
),
],
)
def test_unique(arr, expected):
result = arr.unique()
tm.assert_index_equal(result, expected)
# GH#21737
# Ensure the underlying data is consistent
assert result[0] == expected[0]
def test_index_unique(rand_series_with_duplicate_datetimeindex):
dups = rand_series_with_duplicate_datetimeindex
index = dups.index
uniques = index.unique()
expected = DatetimeIndex(
[
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
)
assert uniques.dtype == "M8[ns]" # sanity
tm.assert_index_equal(uniques, expected)
assert index.nunique() == 4
# GH#2563
assert isinstance(uniques, DatetimeIndex)
dups_local = index.tz_localize("US/Eastern")
dups_local.name = "foo"
result = dups_local.unique()
expected = DatetimeIndex(expected, name="foo")
expected = expected.tz_localize("US/Eastern")
assert result.tz is not None
assert result.name == "foo"
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [NaT.value]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_is_unique_monotonic(rand_series_with_duplicate_datetimeindex):
index = rand_series_with_duplicate_datetimeindex.index
assert not index.is_unique
| bsd-3-clause |
qiudebo/13learn | code/matplotlib/aqy/aqy_line_chart.py | 1 | 1480 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'qiudebo'
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
labels = (u"1-17岁", u"18-24岁", u"25-30岁", u"31-35岁", u"36-40岁", u"40+岁")
x = np.arange(len(labels))
y = [0.07, 0.15, 0.40, 0.18, 0.11, 0.08]
width = 0.35 # 条形图的宽度
fig, ax = plt.subplots()
fig.suptitle(u'我的前半生', fontsize=14, fontweight='bold')
rects1 = ax.plot(x, y, 'b-',label=u'我的前半生')
ax.set_xticks(x)
ax.set_xticklabels(labels)
#ax.plot([0], [0.07], 'o')
ax.plot(x, y, 'o')
for a, b in zip(x, y):
plt.text(a, b + 0.01, '%1.1f%%' % (b * 100), ha='center', va='bottom', fontsize=8, color='green')
plt.ylim(0, 0.45)
# ax.set_yticks(()) # 隐藏y轴
ax.yaxis.grid(True) # 水平网格
ax.legend()
# 添加点和线
ax.scatter(1.5,0.20,s=50,color='r')
ax.plot([1,1],[0.15,0],'k--',lw=2.5)
#lw 线的宽度 黑色
#注解 横坐标 +30 坐标减30
ax.annotate('annotate', xy=(1.5,0.20), xytext=(2, 0.1),
arrowprops=dict(arrowstyle='->',facecolor='black'))
# 图形填充
#ax.fill(x,y,'b')
#plt.fill(x,y,'b')
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.show()
| mit |
umanoidTyphoon/AMICO-CM_Sketches | epsilonOracle/plot2.py | 1 | 6163 | __author__ = 'Vincenzo Deriu'
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
from matplotlib import rcParams
import sys
ERROR_POSITION_SPITTING_WITH_WHITESPACES = 3
width = 0.35 # the width of the bars
epsilons_dir = "./epsilons"
figures_dir = "./figures"
# error_to_plot = int(sys.argv[1])
epsilons_dict = dict()
error_list = [0, 5, 10, 20]
width_dict = dict()
filename = ''
group_name = ''
n = 0
errors = []
epsilons = []
features = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58]
# # Prepare data
# for filename in os.listdir(epsilons_dir):
# if filename == ("0-domain_total_downloads.txt"):
# file_path = epsilons_dir + "/" + filename
#
# with open(file_path) as file:
# for line in file:
# splits = line.split(" ")
# # Extract error
# try:
# error = float(splits[ERROR_POSITION_SPITTING_WITH_WHITESPACES])
# int_error = int(error)
# epsilon = float(splits[len(splits) - 1])
# errors.append(int_error)
# epsilons.append(epsilon)
# except ValueError:
# # print "The string given in input is not a float..."
# continue
# break
#
# print(errors)
# print(epsilons)
#
# pos = np.arange(len(errors))
# width = .5 # gives histogram aspect to the bar diagram
#
# ax = plt.axes()
# ax.set_xlabel("Error [%]")
# ax.set_ylabel("epsilon")
# ax.set_xticks(pos + (width / 2))
# ax.set_xticklabels(errors)
#
# plt.bar(pos, epsilons, width, color='#8cd3f1')
# # plt.show()
# plt.savefig(figures_dir + "/fixed_error_" + str(error_to_plot) + "_on_host_total.pdf", format='pdf', bbox_inches='tight')
def sort_list_dir(filelist):
sorted_listdir = []
ordered_filenames = dict()
for filename in filelist:
if filename.endswith(".txt"):
splits = filename.split("-")
ordered_filenames[int(splits[0])] = "-" + splits[1]
feature_ids = list(ordered_filenames)
feature_ids = sorted(feature_ids)
for ID in feature_ids:
feature_name = ordered_filenames.get(ID)
sorted_listdir.append((str(ID) + feature_name))
# print sorted_listdir
return sorted_listdir
def populate_dictionary(sorted_listdir, dictionary, error_to_be_extracted):
found = 0
for filename in sorted_listdir:
feature_id = -1
file_path = epsilons_dir + "/" + filename
# print file_path
with open(file_path) as file:
for line in file:
# print(line)
splits = line.split(" ")
# Extract error
# print splits
try:
error = float(splits[ERROR_POSITION_SPITTING_WITH_WHITESPACES])
int_error = int(error)
last_delimiter_index = file.name.rfind('/')
feature_name = file.name[last_delimiter_index + 1:]
feature_id = int(feature_name.split("-")[0])
# print feature_id
# print "Err: ", error
if int_error == error_to_be_extracted:
# Extract epsilon: it is contains in the last split
epsilon = float(splits[len(splits) - 1])
# print "Epsilon: ", epsilon
#if epsilon != 0.0:
# print epsilon
#error_epsilon.append(int_error)
#error_epsilon.append(epsilon)
#errors = np.append(errors, error)
epsilons_list = dictionary.get(int_error)
if epsilons_list == None:
epsilons_list = [epsilon]
else:
epsilons_list.append(epsilon)
dictionary[int_error] = epsilons_list
# print epsilons_dict, len(epsilons_dict.get(0))
found = 1
break
else:
continue
except ValueError:
# print "The string given in input is not a float..."
continue
epsilons_list = dictionary.get(error_to_be_extracted)
if(found == 0):
if epsilons_list is None:
epsilons_list = [0.0]
dictionary[error_to_be_extracted] = epsilons_list
else:
epsilons_list.append(0.0)
dictionary[error_to_be_extracted] = epsilons_list
# print epsilons_dict, len(epsilons_dict.get(0))
else:
found = 0
# data[feature_id] = error_epsilon
# print epsilons_dict, len(epsilons_dict.get(error_to_be_extracted))
return dictionary
def compute_width_list(list):
res = []
for epsilon in list:
if epsilon == 0.0:
res.append(epsilon)
else:
width = int(np.ceil(np.exp(1) / epsilon))
res.append(width)
return res
# Compute width for the various sketches
sorted_listdir = sort_list_dir(os.listdir(epsilons_dir))
for error in error_list:
epsilons_dict = populate_dictionary(sorted_listdir, epsilons_dict, error)
# print epsilons_dict
# for error in error_list:
# epsilons_list = epsilons_dict.get(error)
# width_list = compute_width_list(epsilons_list)
# print width_list, len(width_list)
for error in error_list:
epsilons_list = epsilons_dict.get(error)
width_dict[error] = compute_width_list(epsilons_list)
width_list = width_dict.get(20)
pos = np.arange(len(features))
width = 1.0
ax = plt.axes()
ax.set_xlabel("Feature ID")
ax.set_ylabel("width")
ax.set_xticks(pos + (width / 2))
ax.set_xticklabels(sorted(list(features)))
plt.bar(pos, width_list, width, color='#8cd3f1')
plt.show()
| gpl-2.0 |
arrabito/DIRAC | Core/Utilities/Graphs/CurveGraph.py | 5 | 4781 | ########################################################################
# $HeadURL$
########################################################################
""" CurveGraph represents simple line graphs with markers.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import darkenColor, to_timestamp, PrettyDateLocator, \
PrettyDateFormatter, PrettyScalarFormatter
from matplotlib.lines import Line2D
from matplotlib.dates import date2num
import datetime
class CurveGraph( PlotBase ):
"""
The CurveGraph class is a straightforward line graph with markers
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
tmp_max_y = []
tmp_min_y = []
tmp_x = []
for label,num in labels:
xdata = []
ydata = []
xerror = []
yerror = []
color = self.palette.getColor(label)
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
continue
tmp_x.append( key )
tmp_max_y.append( value + error )
tmp_min_y.append( value - error )
xdata.append( key )
ydata.append( value )
xerror.append( 0. )
yerror.append( error )
linestyle = self.prefs.get( 'linestyle', '-' )
marker = self.prefs.get( 'marker', 'o' )
markersize = self.prefs.get( 'markersize', 8. )
markeredgewidth = self.prefs.get( 'markeredgewidth', 1. )
if not self.prefs.get( 'error_bars', False ):
line = Line2D( xdata, ydata, color=color, linewidth=1., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ) )
self.ax.add_line( line )
else:
self.ax.errorbar( xdata, ydata, color=color, linewidth=2., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ), xerr = xerror, yerr = yerror,
ecolor=color )
ymax = max( tmp_max_y )
ymax *= 1.1
ymin = min( tmp_min_y, 0. )
ymin *= 1.1
if 'log_yaxis' in self.prefs:
ymin = 0.001
xmax=max(tmp_x)*1.1
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
else:
return None
| gpl-3.0 |
mehdidc/scikit-learn | sklearn/tests/test_random_projection.py | 19 | 14015 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
rmarkello/pyls | pyls/base.py | 1 | 31764 | # -*- coding: utf-8 -*-
import gc
import warnings
import numpy as np
from sklearn.utils.validation import check_random_state
from . import compute, structures, utils
def gen_permsamp(groups, n_cond, n_perm, seed=None, verbose=True):
"""
Generates permutation arrays for PLS permutation testing
Parameters
----------
groups : (G,) list
List with number of subjects in each of `G` groups
n_cond : int
Number of conditions, for each subject. Default: 1
n_perm : int
Number of permutations for which to generate resampling arrays
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print status updates as permutations are generated.
Default: True
Returns
-------
permsamp : (S, P) `numpy.ndarray`
Subject permutation arrays, where `S` is the number of subjects and `P`
is the requested number of permutations (i.e., `P = n_perm`)
"""
Y = utils.dummy_code(groups, n_cond)
permsamp = np.zeros(shape=(len(Y), n_perm), dtype=int)
subj_inds = np.arange(np.sum(groups), dtype=int)
rs = check_random_state(seed)
warned = False
# calculate some variables for permuting conditions within subject
# do this here to save on calculation time
indices, grps = np.where(Y)
grp_conds = np.split(indices, np.where(np.diff(grps))[0] + 1)
to_permute = [np.vstack(grp_conds[i:i + n_cond]) for i in
range(0, Y.shape[-1], n_cond)]
splitinds = np.cumsum(groups)[:-1]
check_grps = utils.dummy_code(groups).T.astype(bool)
for i in utils.trange(n_perm, verbose=verbose, desc='Making permutations'):
count, duplicated = 0, True
while duplicated and count < 500:
count, duplicated = count + 1, False
# generate conditions permuted w/i subject
inds = np.hstack([utils.permute_cols(i, seed=rs) for i
in to_permute])
# generate permutation of subjects across groups
perm = rs.permutation(subj_inds)
# confirm subjects *are* mixed across groups
if len(groups) > 1:
for grp in check_grps:
if np.all(np.sort(perm[grp]) == subj_inds[grp]):
duplicated = True
# permute conditions w/i subjects across groups and stack
perminds = np.hstack([f.flatten('F') for f in
np.split(inds[:, perm].T, splitinds)])
# make sure permuted indices are not a duplicate sequence
dupe_seq = perminds[:, None] == permsamp[:, :i]
if dupe_seq.all(axis=0).any():
duplicated = True
# if we broke out because we tried 500 permutations and couldn't
# generate a new one, just warn that we're using duplicate
# permutations and give up
if count == 500 and not warned:
warnings.warn('WARNING: Duplicate permutations used.')
warned = True
# store the permuted indices
permsamp[:, i] = perminds
return permsamp
def gen_bootsamp(groups, n_cond, n_boot, seed=None, verbose=True):
"""
Generates bootstrap arrays for PLS bootstrap resampling
Parameters
----------
groups : (G,) list
List with number of subjects in each of `G` groups
n_cond : int
Number of conditions, for each subject. Default: 1
n_boot : int
Number of boostraps for which to generate resampling arrays
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print status updates as bootstrap samples are genereated.
Default: True
Returns
-------
bootsamp : (S, B) `numpy.ndarray`
Subject bootstrap arrays, where `S` is the number of subjects and `B`
is the requested number of bootstraps (i.e., `B = n_boot`)
"""
Y = utils.dummy_code(groups, n_cond)
bootsamp = np.zeros(shape=(len(Y), n_boot), dtype=int)
subj_inds = np.arange(np.sum(groups), dtype=int)
rs = check_random_state(seed)
warned = False
min_subj = int(np.ceil(Y.sum(axis=0).min() * 0.5))
# calculate some variables for ensuring we resample with replacement
# subjects across all their conditions. do this here to save on
# calculation time
indices, grps = np.where(Y)
grp_conds = np.split(indices, np.where(np.diff(grps))[0] + 1)
inds = np.hstack([np.vstack(grp_conds[i:i + n_cond]) for i
in range(0, len(grp_conds), n_cond)])
splitinds = np.cumsum(groups)[:-1]
check_grps = utils.dummy_code(groups).T.astype(bool)
for i in utils.trange(n_boot, verbose=verbose, desc='Making bootstraps'):
count, duplicated = 0, True
while duplicated and count < 500:
count, duplicated = count + 1, False
# empty container to store current bootstrap attempt
boot = np.zeros(shape=(subj_inds.size), dtype=int)
# iterate through and resample from w/i groups
for grp in check_grps:
curr_grp, all_same = subj_inds[grp], True
while all_same:
num_subj = curr_grp.size
boot[curr_grp] = np.sort(rs.choice(curr_grp,
size=num_subj,
replace=True),
axis=0)
# make sure bootstrap has enough unique subjs
if np.unique(boot[curr_grp]).size >= min_subj:
all_same = False
# resample subjects (with conditions) and stack groups
bootinds = np.hstack([f.flatten('F') for f in
np.split(inds[:, boot].T, splitinds)])
# make sure bootstrap is not a duplicated sequence
for grp in check_grps:
curr_grp = subj_inds[grp]
check = bootinds[curr_grp, None] == bootsamp[curr_grp, :i]
if check.all(axis=0).any():
duplicated = True
# if we broke out because we tried 500 bootstraps and couldn't
# generate a new one, just warn that we're using duplicate
# bootstraps and give up
if count == 500 and not warned:
warnings.warn('WARNING: Duplicate bootstraps used.')
warned = True
# store the bootstrapped indices
bootsamp[:, i] = bootinds
return bootsamp
def gen_splits(groups, n_cond, n_split, seed=None, test_size=0.5):
"""
Generates splitting arrays for PLS split-half resampling and CV
Parameters
----------
groups : (G,) list
List with number of subjects in each of `G` groups
n_cond : int
Number of conditions, for each subject. Default: 1
n_split : int
Number of splits for which to generate resampling arrays
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
test_size : (0, 1) float, optional
Percent of subjects to include in the split halves. Default: 0.5
Returns
-------
splitsamp : (S, I) `numpy.ndarray`
Subject split arrays, where `S` is the number of subjects and `I`
is the requested number of splits (i.e., `I = n_split`)
"""
Y = utils.dummy_code(groups, n_cond)
splitsamp = np.zeros(shape=(len(Y), n_split), dtype=bool)
subj_inds = np.arange(np.sum(groups), dtype=int)
rs = check_random_state(seed)
warned = False
# calculate some variables for permuting conditions within subject
# do this here to save on calculation time
indices, grps = np.where(Y)
grp_conds = np.split(indices, np.where(np.diff(grps))[0] + 1)
inds = np.hstack([np.vstack(grp_conds[i:i + n_cond]) for i
in range(0, len(grp_conds), n_cond)])
splitinds = np.cumsum(groups)[:-1]
check_grps = utils.dummy_code(groups).T.astype(bool)
for i in range(n_split):
count, duplicated = 0, True
while duplicated and count < 500:
count, duplicated = count + 1, False
# empty containter to store current split half attempt
split = np.zeros(shape=(subj_inds.size), dtype=bool)
# iterate through and split each group separately
for grp in check_grps:
curr_grp = subj_inds[grp]
take = rs.choice([np.ceil, np.floor])
num_subj = int(take(curr_grp.size * (1 - test_size)))
splinds = rs.choice(curr_grp,
size=num_subj,
replace=False)
split[splinds] = True
# split subjects (with conditions) and stack groups
half = np.hstack([f.flatten('F') for f in
np.split(((inds + 1).astype(bool)
* [split[None]]).T,
splitinds)])
# make sure split half is not a duplicated sequence
dupe_seq = half[:, None] == splitsamp[:, :i]
if dupe_seq.all(axis=0).any():
duplicated = True
if count == 500 and not warned:
warnings.warn('WARNING: Duplicate split halves used.')
warned = True
splitsamp[:, i] = half
return splitsamp
class BasePLS():
"""
Base PLS class to be subclassed
Contains most of the math required for PLS, leaving a few functions for PLS
subclasses to implement. This will not run without those implementations.
Parameters
----------
{input_matrix}
{groups}
{conditions}
**kwargs : optional
Additional key-value pairs; see :obj:`pyls.structures.PLSInputs` for
more info
References
----------
{references}
""".format(**structures._pls_input_docs)
def __init__(self, X, Y=None, groups=None, n_cond=1, **kwargs):
# if groups aren't provided or are provided wrong, fix them
if groups is None:
groups = [len(X) // n_cond]
elif not isinstance(groups, (list, np.ndarray)):
groups = [groups]
# coerce groups to integers
groups = [int(g) for g in groups]
# check that data matrices and groups + n_cond inputs jibe
n_samples = sum([g * n_cond for g in groups])
if len(X) != n_samples:
raise ValueError('Number of samples specified by `groups` and '
'`n_cond` does not match number of samples in '
'input array(s).\n'
' EXPECTED: {}\n'
' ACTUAL: {} (groups: {} * n_cond: {})'
.format(len(X), n_samples, groups, n_cond))
if Y is not None and len(X) != len(Y):
raise ValueError('Provided `X` and `Y` matrices must have the '
'same number of samples. Provided matrices '
'differed: X: {}, Y: {}'.format(len(X), len(Y)))
self.inputs = structures.PLSInputs(X=X, Y=Y, groups=groups,
n_cond=n_cond, **kwargs)
# store dummy-coded array of groups / conditions (save on computation)
self.dummy = utils.dummy_code(groups, n_cond)
self.rs = check_random_state(self.inputs.get('seed'))
# check for parallel processing desire
n_proc = self.inputs.get('n_proc')
if n_proc is not None and n_proc != 1 and not utils.joblib_avail:
self.inputs.n_proc = None
warnings.warn('Setting n_proc > 1 requires the joblib module. '
'Considering installing joblib and re-running this '
'if you would like parallelization. Resetting '
'n_proc to 1 for now.')
def gen_covcorr(self, X, Y, groups=None):
"""
Should generate cross-covariance array to be used in `self._svd()`
Must accept the listed parameters and return one array
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
groups : (G,) array_like
Array with number of subjects in each of `G` groups
Returns
-------
crosscov : np.ndarray
Covariance array for decomposition
"""
raise NotImplementedError
def gen_distrib(self, X, Y, groups=None, original=None):
"""
Should generate behavioral correlations or contrast for bootstrap
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
groups : (S, J) array_like
Dummy coded array, where `S` is observations and `J` corresponds to
the number of different groups x conditions represented in `X` and
`Y`. A value of 1 indicates that an observation belongs to a
specific group or condition
Returns
-------
distrib : (T, L)
Behavioral correlations or contrast for single bootstrap resample
"""
raise NotImplementedError
def run_pls(self, X, Y):
"""
Runs PLS analysis
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
Returns
-------
results : :obj:`pyls.structures.PLSResults`
Results of PLS (not including PLS type-specific outputs)
"""
# initate results structure
self.res = res = structures.PLSResults(inputs=self.inputs)
# get original singular vectors / values
res['x_weights'], res['singvals'], res['y_weights'] = \
self.svd(X, Y, seed=self.rs)
res['x_scores'] = X @ res['x_weights']
if self.inputs.n_perm > 0:
# compute permutations and get statistical significance of LVs
d_perm, ucorrs, vcorrs = self.permutation(X, Y, seed=self.rs)
res['permres']['pvals'] = compute.perm_sig(res['singvals'], d_perm)
res['permres']['permsamples'] = self.permsamp
if self.inputs.n_split is not None:
# get ucorr / vcorr (via split half resampling) for original,
# unpermuted `X` and `Y` arrays
di = np.linalg.inv(res['singvals'])
orig_ucorr, orig_vcorr = self.split_half(X, Y,
res['x_weights'] @ di,
res['y_weights'] @ di,
seed=self.rs)
# get p-values for ucorr/vcorr
ucorr_prob = compute.perm_sig(np.diag(orig_ucorr), ucorrs)
vcorr_prob = compute.perm_sig(np.diag(orig_vcorr), vcorrs)
# get confidence intervals for ucorr/vcorr
ucorr_ll, ucorr_ul = compute.boot_ci(ucorrs, ci=self.inputs.ci)
vcorr_ll, vcorr_ul = compute.boot_ci(vcorrs, ci=self.inputs.ci)
# update results object with split-half resampling results
res['splitres'].update(dict(ucorr=orig_ucorr,
vcorr=orig_vcorr,
ucorr_pvals=ucorr_prob,
vcorr_pvals=vcorr_prob,
ucorr_lolim=ucorr_ll,
vcorr_lolim=vcorr_ll,
ucorr_uplim=ucorr_ul,
vcorr_uplim=vcorr_ul))
return res
def svd(self, X, Y, groups=None, seed=None):
"""
Runs SVD on cross-covariance matrix computed from `X` and `Y`
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
groups : (S, J) array_like
Dummy coded array, where `S` is observations and `J` corresponds to
the number of different groups x conditions represented in `X` and
`Y`. A value of 1 indicates that an observation belongs to a
specific group or condition
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
Returns
-------
U : (B, L) `numpy.ndarray`
Left singular vectors from singular value decomposition
d : (L, L) `numpy.ndarray`
Diagonal array of singular values from singular value decomposition
V : (J, L) `numpy.ndarray`
Right singular vectors from singular value decomposition
"""
# make dummy-coded grouping array if not provided
if groups is None:
groups = utils.dummy_code(self.inputs.groups, self.inputs.n_cond)
# generate cross-covariance matrix and determine # of components
crosscov = self.gen_covcorr(X, Y, groups=groups)
U, d, V = compute.svd(crosscov, seed=seed)
return U, d, V
def bootstrap(self, X, Y, seed=None):
"""
Bootstraps `X` and `Y` (w/replacement) and recomputes SVD
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
Returns
-------
distrib : (T, L) numpy.ndarray
Either behavioral correlations or group x condition contrast;
depends on PLS type
u_sum : (B, L) numpy.ndarray
Sum of the left singular vectors across all bootstraps
u_square : (B, L) numpy.ndarray
Sum of the squared left singular vectors across all bootstraps
"""
# generate bootstrap resampled indices (unless already provided)
self.bootsamp = self.inputs.get('bootsamples', None)
if self.bootsamp is None:
self.bootsamp = gen_bootsamp(self.inputs.groups,
self.inputs.n_cond,
self.inputs.n_boot,
seed=seed,
verbose=self.inputs.verbose)
# make empty arrays to store bootstrapped singular vectors
# these will be used to calculate the standard error later on for
# creation of bootstrap ratios
u_sum = np.zeros_like(self.res['x_weights'])
u_square = np.zeros_like(self.res['x_weights'])
# `distrib` corresponds either to the behavioral correlations (if
# running a behavioral PLS) or to the group/condition contrast (if
# running a mean-centered PLS); we'll just extend it and then stack
# all the individual matrices together later (they're quite small so we
# don't need to be too worried about memory usage, here)
distrib = []
# determine the number of bootstraps we'll run each iteration
iters = 1 if self.inputs.n_proc is None else self.inputs.n_proc
gen = utils.trange(self.inputs.n_boot, verbose=self.inputs.verbose,
desc='Running bootstraps')
with utils.get_par_func(self.inputs.n_proc,
self.__class__._single_boot) as (par, func):
boots = 0
while boots < self.inputs.n_boot:
# determine number of bootstraps to run this round
# we don't want to overshoot the requested number, so make
# sure to cut it off if that's what wold happen
top = boots + iters
if top >= self.inputs.n_boot:
top = self.inputs.n_boot
# run the bootstraps
d, usu = zip(*par(func(self, X=X, Y=Y,
inds=self.bootsamp[..., i],
groups=self.dummy,
original=self.res['x_weights'],
seed=i)
for i in range(boots, top)))
# sum bootstrapped singular vectors and store
u_sum += np.sum(usu, axis=0)
u_square += np.sum(np.square(usu), axis=0)
distrib.extend(d)
# force garbage collection
# this is only really needed when parallelizing bootstraps
# the `usu` variable can get REALLY GIANT if either `X` or `Y`
# is large and `n_proc` is > 1, so we really don't want to keep
# it around for any longer than absolutely necessary
if self.inputs.n_proc is not None:
del usu
gc.collect()
# update progress bar and # of bootstraps already run
gen.update(top - boots)
boots = top
gen.close()
return np.stack(distrib, axis=-1), u_sum, u_square
def _single_boot(self, X, Y, inds, groups=None, original=None, seed=None):
"""
Bootstraps `X` and `Y` (w/replacement) and recomputes SVD
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
groups : (S, J) array_like
Dummy coded input array, where `S` is observations and `J`
corresponds to the number of different groups x conditions. A value
of 1 indicates that an observation belongs to a specific group or
condition.
original : (B, L) array_like
Left singular vector from original decomposition of `X` and `Y`.
Used to perform Procrustes rotation on permuted singular vectors
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
Returns
-------
distrib : np.ndarray
Either behavioral correlations or contrast, depending on PLS type;
generated with self.gen_distrib() which should be specified by the
PLS subclass
U_sum : (B, L) array_like
Left singular vectors from decomposition of bootstrap resampled `X`
and `Y`
"""
# make sure we have original (non-bootstrapped) singular vectors
# these are required for the procrustes rotation to ensure our
# singular vectors are all in the same orientation
if original is None:
original = self.svd(X, Y, groups=groups, seed=seed)[0]
# perform SVD of bootstrapped arrays and rotate left singular vectors
U, d = self.svd(X[inds], Y[inds], groups=groups, seed=seed)[:-1]
U_boot = compute.procrustes(original, U, d)
# get contrast / behavcorrs (this function should be specified by the
# subclass)
distrib = self.gen_distrib(X[inds], Y[inds], original, groups)
return distrib, U_boot
def make_permutation(self, X, Y, perminds):
"""
Permutes `Y` according to `perminds`, leaving `X` un-permuted
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
perminds : (S,) array_like
Array by which to permute `Y`
Returns
-------
Xp : (S, B) array_like
Identical to `X`
Yp : (S, T) array_like
`Y`, permuted according to `perminds`
"""
return X, Y[perminds]
def permutation(self, X, Y, seed=None):
"""
Permutes `X` (w/o replacement) and recomputes SVD
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
Returns
-------
d_perm : (L, P) `numpy.ndarray`
Permuted singular values, where `L` is the number of singular
values and `P` is the number of permutations
ucorrs : (L, P) `numpy.ndarray`
Split-half correlations of left singular values. Only set if
`self.inputs.n_split != 0`
vcorrs : (L, P) `numpy.ndarray`
Split-half correlations of right singular values. Only set if
`self.inputs.n_split != 0`
"""
# generate permuted indices (unless already provided)
self.permsamp = self.inputs.get('permsamples')
if self.permsamp is None:
self.permsamp = gen_permsamp(self.inputs.groups,
self.inputs.n_cond,
self.inputs.n_perm,
seed=seed,
verbose=self.inputs.verbose)
# get permuted values (parallelizing as requested)
gen = utils.trange(self.inputs.n_perm, verbose=self.inputs.verbose,
desc='Running permutations')
with utils.get_par_func(self.inputs.n_proc,
self.__class__._single_perm) as (par, func):
out = par(func(self, X=X, Y=Y, inds=self.permsamp[:, i],
groups=self.dummy, original=self.res['y_weights'],
seed=i)
for i in gen)
d_perm, ucorrs, vcorrs = [np.stack(o, axis=-1) for o in zip(*out)]
return d_perm, ucorrs, vcorrs
def _single_perm(self, X, Y, inds, groups=None, original=None, seed=None):
"""
Permutes `X` (w/o replacement) and recomputes SVD
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
inds : (S,) array_like
Permutation resampling array
original : (J, L) array_like
Right singular vector from original decomposition of `X` and `Y`.
Used to perform Procrustes rotation on permuted singular values,
if desired
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
Returns
-------
d_perm : (L,) `numpy.ndarray`
Permuted singular values, where `L` is the number of singular
values
ucorrs : (L,) `numpy.ndarray`
Split-half correlations of left singular values. Only set if
`self.inputs.n_split != 0`
vcorrs : (L,) `numpy.ndarray`
Split-half correlations of right singular values. Only set if
`self.inputs.n_split != 0`
"""
# calculate SVD of permuted matrices
Xp, Yp = self.make_permutation(X, Y, inds)
U, d, V = self.svd(Xp, Yp, groups=groups, seed=seed)
# optionally get rotated/rescaled singular values
if self.inputs.rotate:
if original is None:
original = self.svd(X, Y, groups=groups, seed=seed)[-1]
ssd = np.sqrt(np.sum(compute.procrustes(original, V, d)**2,
axis=0))
else:
ssd = np.diag(d)
# get ucorr/vcorr if split-half resampling requested
if self.inputs.n_split is not None:
di = np.linalg.inv(d)
ucorr, vcorr = self.split_half(Xp, Yp, U @ di, V @ di,
groups=groups, seed=seed)
else:
ucorr, vcorr = None, None
return ssd, ucorr, vcorr
def split_half(self, X, Y, ud=None, vd=None, groups=None, seed=None):
"""
Parameters
----------
X : (S, B) array_like
Input data matrix, where `S` is observations and `B` is features
Y : (S, T) array_like
Input data matrix, where `S` is observations and `T` is features
ud : (B, L) array_like
Left singular vectors, scaled by singular values
vd : (J, L) array_like
Right singular vectors, scaled by singular values
seed : {int, :obj:`numpy.random.RandomState`, None}, optional
Seed for random number generation. Default: None
Returns
-------
ucorr : (L,) `numpy.ndarray`
Average correlation of left singular vectors across split-halves
vcorr : (L,) `numpy.ndarray`
Average correlation of right singular vectors across split-halves
"""
# generate splits
splitsamp = gen_splits(self.inputs.groups,
self.inputs.n_cond,
self.inputs.n_split,
seed=seed,
test_size=0.5).astype(bool)
# make dummy-coded grouping array if not provided
if groups is None:
groups = utils.dummy_code(self.inputs.groups, self.inputs.n_cond)
# generate original singular vectors if not provided
if ud is None or vd is None:
U, d, V = self.svd(X, Y, groups=groups, seed=seed)
di = np.linalg.inv(d)
ud, vd = U @ di, V @ di
# empty arrays to hold split-half correlations
ucorr = np.zeros(shape=(ud.shape[-1], self.inputs.n_split))
vcorr = np.zeros(shape=(vd.shape[-1], self.inputs.n_split))
for i in range(self.inputs.n_split):
# calculate cross-covariance matrix for both splits
spl = splitsamp[:, i]
D1 = self.gen_covcorr(X[spl], Y[spl], groups=groups[spl])
D2 = self.gen_covcorr(X[~spl], Y[~spl], groups=groups[~spl])
# project cross-covariance matrices onto original SVD to obtain
# left & right singular vector and correlate between split halves
ucorr[:, i] = compute.efficient_corr(D1.T @ vd, D2.T @ vd)
vcorr[:, i] = compute.efficient_corr(D1 @ ud, D2 @ ud)
# return average correlations for singular vectors across `n_split`
return np.mean(ucorr, axis=-1), np.mean(vcorr, axis=-1)
| gpl-2.0 |
pompiduskus/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
robin-lai/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
SnippyHolloW/speech_embeddings | gmm.py | 2 | 2745 | """Trains a GMM on formant data (e.g. from the Hillenbrand corpus).
"""
import numpy as np
import pylab as pl
from sklearn.mixture import GMM
from sklearn import metrics
from collections import defaultdict
def parse(fname):
with open(fname) as f:
d = map(lambda l: l.rstrip('\n').split(), f.readlines())
header = d[0]
d = filter(lambda x: not 'NaN' in x, d)
return header, np.array(d[1:])
def eval_clusters(y_pred, y, X, gmm):
# 2D distance based 1-to-1 matching between y_pred and y
sety = set(y)
mapsety = dict(zip(xrange(len(sety)), sety))
assert(len(set(y_pred)) == len(sety))
maps_to = {}
d_m = np.ndarray((len(sety), len(sety)))
for i, phone in enumerate(sety): # i, phone: ahah!
for ncomponent in xrange(gmm.means_.shape[0]):
d_m[i,ncomponent] = np.linalg.norm(gmm.means_[ncomponent]
- np.mean(X[y==phone]))
for _ in xrange(d_m.shape[0]):
indices = np.unravel_index(d_m.argmin(), d_m.shape)
while mapsety[indices[0]] in maps_to or indices[1] in maps_to.values():
d_m[indices[0],indices[1]] = np.finfo('d').max
indices = np.unravel_index(d_m.argmin(), d_m.shape)
maps_to[mapsety[indices[0]]] = indices[1]
d_m[indices[0],indices[1]] = np.finfo('d').max
print maps_to
y_gold = np.array(map(lambda x: maps_to[x], y))
print "Adjusted rand scores:",
print metrics.adjusted_rand_score(y_gold, y_pred)
print "Homogeneity:",
print metrics.homogeneity_score(y_gold, y_pred)
print "Completeness:",
print metrics.completeness_score(y_gold, y_pred)
print "V-measure:",
print metrics.v_measure_score(y_gold, y_pred)
return y_pred, y_gold, maps_to
if __name__ == "__main__":
h, d = parse('data/formants.dat')
X = d[:, 3:5].astype(np.float)
y = d[:, 2]
sety = set(y)
print "All the", len(sety), "vowels:", sety
gmm = GMM(n_components=len(sety)) # default covar='diag'
gmm.fit(X)
y_pred, y_gold, maps_to = eval_clusters(gmm.predict(X), y, X, gmm)
#pl.scatter(X[:,1], X[:,0], s=20, c=y_gold)
import matplotlib.cm as cm
colors = cm.rainbow(np.linspace(0, 1, 2*len(sety)))
ax = pl.subplot(2, 1, 1)
for i, phone in enumerate(sety): # oups, I did it again
pl.scatter(X[y==phone,1], X[y==phone,0], s=20,
c=colors[2*i], label=phone)
pl.legend(bbox_to_anchor=(0., 1.02, 1., 1.102), loc=8,
ncol=len(sety)/2, mode="expand", borderaxespad=0.)
ax = pl.subplot(2, 1, 2)
for i, phone in enumerate(set(y_pred)): # oups, I did it again
pl.scatter(X[y_pred==phone,1], X[y_pred==phone,0], s=20,
c=colors[2*i], label=phone)
pl.show()
| mit |
nikitasingh981/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 22 | 20592 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(
len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert_less(ransac_estimator.n_trials_, max_trials + 1)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 5)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 5)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 5)
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
assert_warns(UserWarning, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
xyguo/scikit-learn | sklearn/gaussian_process/gpc.py | 42 | 31571 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
NelisVerhoef/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
jseabold/statsmodels | statsmodels/base/distributed_estimation.py | 5 | 22186 | from statsmodels.base.elastic_net import RegularizedResults
from statsmodels.stats.regularized_covariance import _calc_nodewise_row, \
_calc_nodewise_weight, _calc_approx_inv_cov
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.regression.linear_model import OLS
import numpy as np
"""
Distributed estimation routines. Currently, we support several
methods of distribution
- sequential, has no extra dependencies
- parallel
- with joblib
A variety of backends are supported through joblib
This allows for different types of clusters besides
standard local clusters. Some examples of
backends supported by joblib are
- dask.distributed
- yarn
- ipyparallel
The framework is very general and allows for a variety of
estimation methods. Currently, these include
- debiased regularized estimation
- simple coefficient averaging (naive)
- regularized
- unregularized
Currently, the default is regularized estimation with debiasing
which follows the methods outlined in
Jason D. Lee, Qiang Liu, Yuekai Sun and Jonathan E. Taylor.
"Communication-Efficient Sparse Regression: A One-Shot Approach."
arXiv:1503.04337. 2015. https://arxiv.org/abs/1503.04337.
There are several variables that are taken from the source paper
for which the interpretation may not be directly clear from the
code, these are mostly used to help form the estimate of the
approximate inverse covariance matrix as part of the
debiasing procedure.
wexog
A weighted design matrix used to perform the node-wise
regression procedure.
nodewise_row
nodewise_row is produced as part of the node-wise regression
procedure used to produce the approximate inverse covariance
matrix. One is produced for each variable using the
LASSO.
nodewise_weight
nodewise_weight is produced using the gamma_hat values for
each p to produce weights to reweight the gamma_hat values which
are ultimately used to form approx_inv_cov.
approx_inv_cov
This is the estimate of the approximate inverse covariance
matrix. This is used to debiase the coefficient average
along with the average gradient. For the OLS case,
approx_inv_cov is an approximation for
n * (X^T X)^{-1}
formed by node-wise regression.
"""
def _est_regularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the regularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
Returns
-------
An array of the parameters for the regularized fit
"""
if fit_kwds is None:
raise ValueError("_est_regularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit_regularized(**fit_kwds).params
def _est_unregularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the unregularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit
Returns
-------
An array of the parameters for the fit
"""
if fit_kwds is None:
raise ValueError("_est_unregularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit(**fit_kwds).params
def _join_naive(params_l, threshold=0):
"""joins the results from each run of _est_<type>_naive
and returns the mean estimate of the coefficients
Parameters
----------
params_l : list
A list of arrays of coefficients.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(params_l[0])
partitions = len(params_l)
params_mn = np.zeros(p)
for params in params_l:
params_mn += params
params_mn /= partitions
params_mn[np.abs(params_mn) < threshold] = 0
return params_mn
def _calc_grad(mod, params, alpha, L1_wt, score_kwds):
"""calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params)
"""
grad = -mod.score(np.asarray(params), **score_kwds)
grad += alpha * (1 - L1_wt)
return grad
def _calc_wdesign_mat(mod, params, hess_kwds):
"""calculates the weighted design matrix necessary to generate
the approximate inverse covariance matrix
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
hess_kwds : dict-like or None
Keyword arguments for the hessian function.
Returns
-------
An array-like object, updated design matrix, same dimension
as mod.exog
"""
rhess = np.sqrt(mod.hessian_factor(np.asarray(params), **hess_kwds))
return rhess[:, None] * mod.exog
def _est_regularized_debiased(mod, mnum, partitions, fit_kwds=None,
score_kwds=None, hess_kwds=None):
"""estimates the regularized fitted parameters, is the default
estimation_method for class DistributedModel.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
mnum : scalar
Index of current partition.
partitions : scalar
Total number of partitions.
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
A tuple of parameters for regularized fit
An array-like object of the fitted parameters, params
An array-like object for the gradient
A list of array like objects for nodewise_row
A list of array like objects for nodewise_weight
"""
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if fit_kwds is None:
raise ValueError("_est_regularized_debiased currently " +
"requires that fit_kwds not be None.")
else:
alpha = fit_kwds["alpha"]
if "L1_wt" in fit_kwds:
L1_wt = fit_kwds["L1_wt"]
else:
L1_wt = 1
nobs, p = mod.exog.shape
p_part = int(np.ceil((1. * p) / partitions))
params = mod.fit_regularized(**fit_kwds).params
grad = _calc_grad(mod, params, alpha, L1_wt, score_kwds) / nobs
wexog = _calc_wdesign_mat(mod, params, hess_kwds)
nodewise_row_l = []
nodewise_weight_l = []
for idx in range(mnum * p_part, min((mnum + 1) * p_part, p)):
nodewise_row = _calc_nodewise_row(wexog, idx, alpha)
nodewise_row_l.append(nodewise_row)
nodewise_weight = _calc_nodewise_weight(wexog, nodewise_row, idx,
alpha)
nodewise_weight_l.append(nodewise_weight)
return params, grad, nodewise_row_l, nodewise_weight_l
def _join_debiased(results_l, threshold=0):
"""joins the results from each run of _est_regularized_debiased
and returns the debiased estimate of the coefficients
Parameters
----------
results_l : list
A list of tuples each one containing the params, grad,
nodewise_row and nodewise_weight values for each partition.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(results_l[0][0])
partitions = len(results_l)
params_mn = np.zeros(p)
grad_mn = np.zeros(p)
nodewise_row_l = []
nodewise_weight_l = []
for r in results_l:
params_mn += r[0]
grad_mn += r[1]
nodewise_row_l.extend(r[2])
nodewise_weight_l.extend(r[3])
nodewise_row_l = np.array(nodewise_row_l)
nodewise_weight_l = np.array(nodewise_weight_l)
params_mn /= partitions
grad_mn *= -1. / partitions
approx_inv_cov = _calc_approx_inv_cov(nodewise_row_l, nodewise_weight_l)
debiased_params = params_mn + approx_inv_cov.dot(grad_mn)
debiased_params[np.abs(debiased_params) < threshold] = 0
return debiased_params
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results
class DistributedModel(object):
__doc__ = """
Distributed model class
Parameters
----------
partitions : scalar
The number of partitions that the data will be split into.
model_class : statsmodels model class
The model class which will be used for estimation. If None
this defaults to OLS.
init_kwds : dict-like or None
Keywords needed for initializing the model, in addition to
endog and exog.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
estimation_method : function or None
The method that performs the estimation for each partition.
If None this defaults to _est_regularized_debiased.
estimation_kwds : dict-like or None
Keywords to be passed to estimation_method.
join_method : function or None
The method used to recombine the results from each partition.
If None this defaults to _join_debiased.
join_kwds : dict-like or None
Keywords to be passed to join_method.
results_class : results class or None
The class of results that should be returned. If None this
defaults to RegularizedResults.
results_kwds : dict-like or None
Keywords to be passed to results class.
Attributes
----------
partitions : scalar
See Parameters.
model_class : statsmodels model class
See Parameters.
init_kwds : dict-like
See Parameters.
init_kwds_generator : generator or None
See Parameters.
estimation_method : function
See Parameters.
estimation_kwds : dict-like
See Parameters.
join_method : function
See Parameters.
join_kwds : dict-like
See Parameters.
results_class : results class
See Parameters.
results_kwds : dict-like
See Parameters.
Notes
-----
Examples
--------
"""
def __init__(self, partitions, model_class=None,
init_kwds=None, estimation_method=None,
estimation_kwds=None, join_method=None, join_kwds=None,
results_class=None, results_kwds=None):
self.partitions = partitions
if model_class is None:
self.model_class = OLS
else:
self.model_class = model_class
if init_kwds is None:
self.init_kwds = {}
else:
self.init_kwds = init_kwds
if estimation_method is None:
self.estimation_method = _est_regularized_debiased
else:
self.estimation_method = estimation_method
if estimation_kwds is None:
self.estimation_kwds = {}
else:
self.estimation_kwds = estimation_kwds
if join_method is None:
self.join_method = _join_debiased
else:
self.join_method = join_method
if join_kwds is None:
self.join_kwds = {}
else:
self.join_kwds = join_kwds
if results_class is None:
self.results_class = RegularizedResults
else:
self.results_class = results_class
if results_kwds is None:
self.results_kwds = {}
else:
self.results_kwds = results_kwds
def fit(self, data_generator, fit_kwds=None, parallel_method="sequential",
parallel_backend=None, init_kwds_generator=None):
"""Performs the distributed estimation using the corresponding
DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like or None
Keywords needed for the model fitting.
parallel_method : str
type of distributed estimation to be used, currently
"sequential", "joblib" and "dask" are supported.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
if fit_kwds is None:
fit_kwds = {}
if parallel_method == "sequential":
results_l = self.fit_sequential(data_generator, fit_kwds,
init_kwds_generator)
elif parallel_method == "joblib":
results_l = self.fit_joblib(data_generator, fit_kwds,
parallel_backend,
init_kwds_generator)
else:
raise ValueError("parallel_method: %s is currently not supported"
% parallel_method)
params = self.join_method(results_l, **self.join_kwds)
# NOTE that currently, the dummy result model that is initialized
# here does not use any init_kwds from the init_kwds_generator event
# if it is provided. It is possible to imagine an edge case where
# this might be a problem but given that the results model instance
# does not correspond to any data partition this seems reasonable.
res_mod = self.model_class([0], [0], **self.init_kwds)
return self.results_class(res_mod, params, **self.results_kwds)
def fit_sequential(self, data_generator, fit_kwds,
init_kwds_generator=None):
"""Sequentially performs the distributed estimation using
the corresponding DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
results_l = []
if init_kwds_generator is None:
for pnum, (endog, exog) in enumerate(data_generator):
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds)
results_l.append(results)
else:
tup_gen = enumerate(zip(data_generator,
init_kwds_generator))
for pnum, ((endog, exog), init_kwds_e) in tup_gen:
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds, init_kwds_e)
results_l.append(results)
return results_l
def fit_joblib(self, data_generator, fit_kwds, parallel_backend,
init_kwds_generator=None):
"""Performs the distributed estimation in parallel using joblib
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
from statsmodels.tools.parallel import parallel_func
par, f, n_jobs = parallel_func(_helper_fit_partition, self.partitions)
if parallel_backend is None and init_kwds_generator is None:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is not None and init_kwds_generator is None:
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
elif parallel_backend is not None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
return results_l
class DistributedResults(LikelihoodModelResults):
"""
Class to contain model results
Parameters
----------
model : class instance
Class instance for model used for distributed data,
this particular instance uses fake data and is really
only to allow use of methods like predict.
params : ndarray
Parameter estimates from the fit model.
"""
def __init__(self, model, params):
super(DistributedResults, self).__init__(model, params)
def predict(self, exog, *args, **kwargs):
"""Calls self.model.predict for the provided exog. See
Results.predict.
Parameters
----------
exog : array_like NOT optional
The values for which we want to predict, unlike standard
predict this is NOT optional since the data in self.model
is fake.
*args :
Some models can take additional arguments. See the
predict method of the model for the details.
**kwargs :
Some models can take additional keywords arguments. See the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
return self.model.predict(self.params, exog, *args, **kwargs)
| bsd-3-clause |
joyeshmishra/spark-tk | python/sparktk/frame/ops/to_pandas.py | 14 | 4721 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def to_pandas(self, n=None, offset=0, columns=None):
"""
Brings data into a local pandas dataframe.
Similar to the 'take' function, but puts the data into a pandas dataframe.
Parameters
----------
:param n: (Optional(int)) The number of rows to get from the frame (warning: do not overwhelm the python session
by taking too much)
:param offset: (Optional(int)) The number of rows to skip before copying. Defaults to 0.
:param columns: (Optional(List[str])) Column filter. The list of names to be included. Default is all columns.
:return: (pandas.DataFrame) A new pandas dataframe object containing the taken frame data.
Examples
--------
<hide>
>>> data = [["Fred", "555-1234"],["Susan", "555-0202"],["Thurston","555-4510"],["Judy","555-2183"]]
>>> column_names = ["name", "phone"]
>>> frame = tc.frame.create(data, column_names)
</hide>
Consider the following spark-tk frame, where we have columns for name and phone number:
>>> frame.inspect()
[#] name phone
=======================
[0] Fred 555-1234
[1] Susan 555-0202
[2] Thurston 555-4510
[3] Judy 555-2183
>>> frame.schema
[('name', <type 'str'>), ('phone', <type 'str'>)]
The frame to_pandas() method is used to get a pandas DataFrame that contains the data from the spark-tk frame. Note
that since no parameters are provided when to_pandas() is called, the default values are used for the number of
rows, the row offset, and the columns.
>>> pandas_frame = frame.to_pandas()
>>> pandas_frame
name phone
0 Fred 555-1234
1 Susan 555-0202
2 Thurston 555-4510
3 Judy 555-2183
"""
try:
import pandas
except:
raise RuntimeError("pandas module not found, unable to download. Install pandas or try the take command.")
from sparktk.frame.ops.take import take_rich
result = take_rich(self, n, offset, columns)
headers, data_types = zip(*result.schema)
frame_data = result.data
from sparktk import dtypes
import datetime
date_time_columns = [i for i, x in enumerate(self.schema) if x[1] in (dtypes.datetime, datetime.datetime)]
has_date_time = len(date_time_columns) > 0
# translate our datetime long to datetime, so that it gets into the pandas df as a datetime column
def long_to_date_time(row):
for i in date_time_columns:
if isinstance(row[i], long):
row[i] = datetime.datetime.fromtimestamp(row[i]//1000).replace(microsecond=row[i]%1000*1000)
return row
if (has_date_time):
frame_data = map(long_to_date_time, frame_data)
# create pandas df
pandas_df = pandas.DataFrame(frame_data, columns=headers)
for i, dtype in enumerate(data_types):
dtype_str = _sparktk_dtype_to_pandas_str(dtype)
try:
pandas_df[[headers[i]]] = pandas_df[[headers[i]]].astype(dtype_str)
except (TypeError, ValueError):
if dtype_str.startswith("int"):
# DataFrame does not handle missing values in int columns. If we get this error, use the 'object' datatype instead.
print "WARNING - Encountered problem casting column %s to %s, possibly due to missing values (i.e. presence of None). Continued by casting column %s as 'object'" % (headers[i], dtype_str, headers[i])
pandas_df[[headers[i]]] = pandas_df[[headers[i]]].astype("object")
else:
raise
return pandas_df
def _sparktk_dtype_to_pandas_str(dtype):
"""maps spark-tk schema types to types understood by pandas, returns string"""
from sparktk import dtypes
if dtype ==dtypes.datetime:
return "datetime64[ns]"
elif dtypes.dtypes.is_primitive_type(dtype):
return dtypes.dtypes.to_string(dtype)
return "object"
| apache-2.0 |
abhisg/scikit-learn | sklearn/datasets/__init__.py | 15 | 3741 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
rbdedu/runway | main.py | 1 | 2274 | import numpy as np
import utils
import ksegment
import Coreset
import matplotlib.pyplot as plt
import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvas
from kivy.properties import StringProperty
# from kivy.config import Config
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.uix.button import Button
# Config.set('graphics', 'width', '720')
# Config.set('graphics', 'height', '1280')
Builder.load_string('''
<ScrollableLabel>:
Label:
size_hint_y: None
height: self.texture_size[1]
text_size: self.width, None
text: root.text
''')
class ScrollableLabel(ScrollView):
text = StringProperty('')
class TestKivyApp(App):
def build(self):
b = BoxLayout(orientation='vertical')
# self.add_widget(BoxLayout(orientation='vertical'))
fig1 = plt.figure()
fig1.suptitle('coreset lines results visualization')
k = 10
epsilon = 0.2
# data = np.genfromtxt("input/haifa-herzliya.csv", delimiter=" ")
# data = np.genfromtxt("input/bus_line_24.csv", delimiter=" ")
data = np.genfromtxt("input/by_foot.csv", delimiter=" ") # to fix the same as the first one
# data = np.genfromtxt("input/natbag-krayot.csv", delimiter=" ")
size_of_input = int(round((data.size/2)/k, 0))
p = np.c_[np.mgrid[1:size_of_input * k + 1], data]
coreset, bi = Coreset.build_coreset(p, k, epsilon)
dividers = ksegment.coreset_k_segment(coreset, k)
scroll_text = ScrollableLabel(text='[%s]' % ', '.join(map(str, coreset)), size_hint_y=None, height=300)
fig1 = utils.visualize_2d_compare(p, dividers, k, epsilon, bi)
wid = FigureCanvas(fig1)
wid.size = (300, 300)
# b.add_widget(l)
# b.add_widget(scroll_text)
l = Label(text='dividing points:\n' + '[%s]' % ', '.join(map(str, dividers)), size_hint_y=None, height=90)
b.add_widget(l)
b.add_widget(wid)
b.add_widget(scroll_text)
Window.size = (720, 1280)
return b
if __name__ == '__main__':
TestKivyApp().run()
| mit |
moutai/scikit-learn | examples/decomposition/plot_sparse_coding.py | 33 | 4038 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
astraw/mplsizer | demo/demo_gridsizer2.py | 2 | 1658 | import pylab
import numpy
# Demonstration of MplGridSizer use.
def labelax(ax,label):
ax.text(0.5,0.5,label,
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes,
)
fig = pylab.figure(figsize=(8,6))
# Axes placement doesn't matter, but to make sure matplotlib doesn't
# simply return a previous Axes instance with the same bounding box,
# assign a different label to each Axes instance.
import mpl_toolkits.mplsizer as mplsizer
frame = mplsizer.MplSizerFrame( fig )
sizer = mplsizer.MplBoxSizer()#orientation='horizontal')
frame.SetSizer(sizer)#,expand=1)
x = numpy.linspace(0,2*numpy.pi,100)
y = numpy.sin(1*x+numpy.pi/2) + .5*numpy.sin(3*x)
cols = 3
rows = 4
hsizer = mplsizer.MplGridSizer(cols=cols)#,vgap_inch=0.1)
for r in range(rows):
for c in range(cols):
if r==1 and c==1:
# This is how to add an empty element.
ax = mplsizer.MplSizerElement()
else:
# The unique labels are required to generate separate Axes instances.
ax = fig.add_axes([0,0,1,1],label='row %d col %d'%(r,c))
ax.plot(x,y)
labelax(ax,'%d,%d'%(r,c))
if not (r==2 and c==2):
# Draw tick labels on one Axes instance.
pylab.setp(ax,'xticks',[])
pylab.setp(ax,'yticks',[])
# The "border" value below was hand-tuned to not overlap.
hsizer.Add(ax,name='row %d, col %d'%(r,c),all=1,border=0.3,expand=1)
sizer.Add(hsizer,all=1,bottom=1,border=0.25,expand=1,option=1)
frame.Layout() # Trigger the layout within mplsizer.
pylab.show()
| mit |
enguy/FAST-iCLIP | bin/oldscripts/fastclip_icountData.py | 2 | 65861 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
import cmath
import math
import sys
import numpy as np
import glob
import subprocess
import re
from matplotlib_venn import venn2
import pandas as pd
from collections import defaultdict
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import shutil
from optparse import OptionParser
mpl.rcParams['savefig.dpi'] = 2 * mpl.rcParams['savefig.dpi']
# <codecell>
global sampleName
global outfilepath
global logFile
global logOpen
### File name ###
sampleName=sys.argv[1]
infilepath=os.getcwd() + '/' + 'rawdata/'
outfilepath=os.getcwd() + '/results/%s/'%sampleName
# <codecell>
# Create log and start pipeline
logFile=outfilepath + "runLog"
logOpen=open(logFile, 'w')
# <codecell>
### Parameters ###
iCLIP3pBarcode='AGATCGGAAGAGCGGTTCAG' # Barcode sequence to trim from reads.
q=25 # Minimum quality score to keep during filtering.
p=80 # Percentage of bases that must have quality > q during filtering.
iCLIP5pBasesToTrim=9 # Number of reads to trim from 5' end of clip reads.
k='1' # k=N distinct, valid alignments for each read in bt2 mapping.
threshold=2 # Sum of RT stops (for both replicates) required to keep file.
expand=15 # Bases to expand around RT position after RT stops are merged.
repeat_index=os.getcwd() + '/docs/repeat/rep' # bt2 index for repeat RNA.
repeatGenomeBuild=os.getcwd()+'/docs/repeat/repeatRNA.fa' # Sequence of repeat index.
repeatAnnotation=os.getcwd()+'/docs/repeat/Hs_repeatIndex_positions.txt' # Repeat annotation file.
start18s=3657
end18s=5527
start5s=6623
end5s=6779
start28s=7935
end28s=12969
rRNAend=13314
threshold_rep=0 # RT stop threshold for repeat index.
index=os.getcwd() + '/docs/hg19/hg19' # bt2 index for mapping.
index_tag='hg19' # Name of bt2 index.
genomeFile=os.getcwd()+'/docs/human.hg19.genome' # Genome file for bedGraph, etc.
genomeForCLIPper='-shg19' # Parameter for CLIPper.
blacklistregions=os.getcwd()+'/docs/wgEncodeDukeMapabilityRegionsExcludable.bed' # Blacklist masker.
repeatregions=os.getcwd()+'/docs/repeat_masker.bed' # Repeat masker.
geneAnnot=glob.glob(os.getcwd()+'/docs/genes_types/*') # List of genes by type.
snoRNAmasker=os.getcwd()+'/docs/snoRNA_reference/snoRNAmasker_formatted_5pExtend.bed' # snoRNA masker file.
miRNAmasker=os.getcwd()+'/docs/miR_sort_clean.bed' # miRNA masker file.
fivePUTRBed=os.getcwd()+'/docs/5pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
threePUTRBed=os.getcwd()+'/docs/3pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
cdsBed=os.getcwd()+'/docs/Exons_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
utrFile=os.getcwd()+'/docs/hg19_ensembl_UTR_annotation.txt' # UTR annotation file.
genesFile=os.getcwd()+'/docs/hg19_ensembl_genes.txt' # Gene annotation file.
sizesFile=os.getcwd()+'/docs/hg19.sizes' # Genome sizes file.
snoRNAindex=os.getcwd()+'/docs/snoRNA_reference/sno_coordinates_hg19_formatted.bed' # snoRNA coordinate file.
CLIPPERoutNameDelim='_' # Delimiter that for splitting gene name in the CLIPper windows file.
# <codecell>
import datetime
now=datetime.datetime.now()
logOpen.write("Timestamp:%s\n"%str(now))
logOpen.write("\n###Parameters used###\n")
logOpen.write("3' barcode:%s\n'"%iCLIP3pBarcode)
logOpen.write("Minimum quality score (q):%s\n"%q)
logOpen.write("Percentage of bases with > q:%s\n"%p)
logOpen.write("5' bases to trim:%s\n'"%iCLIP5pBasesToTrim)
logOpen.write("k distinct, valid alignments for each read in bt2 mapping:%s\n"%k)
logOpen.write("Threshold for minimum number of RT stops:%s\n"%threshold)
logOpen.write("Bases for expansion around conserved RT stops:%s\n"%expand)
logOpen.write("\n\n\n")
# <codecell>
print "Processing sample %s" %(sampleName)
logOpen.write("Processing sample: "+sampleName+'\n')
read1=infilepath+sampleName+'_R1.fastq'
read2=infilepath+sampleName+'_R2.fastq'
unzippedreads=[read1,read2]
# <codecell>
def trimReads3p(unzippedreads,adapter3p):
# Usage: Trims a specified adapter sequence from the 3p end of the reads.
# Input: List of fastq files.
# Output: List of 3p trimmed files.
trimparam='-a'+adapter3p # Adapter string
trimmedReads=[]
try:
for inread in unzippedreads:
outread=inread.replace("rawdata/", "results/%s/"%sampleName)
outread=outread.replace(".fastq", "_3ptrimmed.fastq")
process=subprocess.Popen(['fastx_clipper',trimparam,'-n','-l33','-Q64','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
logOpen.write("Trim 3p end of reads.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 3p trimming.\n")
print "Problem with 3p trimming."
print "Trim 3p adapter from reads."
trimmedReads3p=trimReads3p(unzippedreads,iCLIP3pBarcode)
# <codecell>
def qualityFilter(trim3pReads,q,p):
# Usage: Filters reads based upon quality score.
# Input: List of fastq file names as well as the quality paramters p and q.
# Output: List of modified fastq file names.
qualityparam='-q'+str(q)
percentrageparam='-p'+str(p)
filteredReads=[]
try:
for inread in trim3pReads:
outread=inread.replace(".fastq", "_filter.fastq")
process=subprocess.Popen(['fastq_quality_filter',qualityparam,percentrageparam,'-Q64','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform quality filtering.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
filteredReads=filteredReads+[outread]
return filteredReads
except:
logOpen.write("Problem with quality filter.\n")
print "Problem with quality filter."
print "Perform quality filtering."
filteredReads=qualityFilter(trimmedReads3p,q,p)
# <codecell>
def dupRemoval(filteredReads):
# Usage: Removes duplicate reads.
# Input: List of fastq file names.
# Output: List of reads in FASTA format.
program=os.getcwd() + '/bin/fasta_to_fastq.pl'
noDupes=[]
try:
for inread in filteredReads:
outread=inread.replace(".fastq","_nodupe.fasta")
process=subprocess.Popen(['fastx_collapser','-Q64','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform duplicate removal.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
fastqOut=outread.replace('.fasta', '.fastq') # fastx_collapser returns fasta files, which are then converted to fastq.
outfh=open(fastqOut, 'w')
process=subprocess.Popen(['perl',program,outread],stdout=outfh)
process.communicate() # Wait for the process to complete.
os.remove(outread) # Remove the remaining .fasta file.
noDupes=noDupes+[fastqOut]
return noDupes
except:
logOpen.write("Problem with duplicate removal.\n")
print "Problem with duplicate removal."
print "Perform duplicate removal."
nodupReads=dupRemoval(filteredReads)
# <codecell>
def trimReads5p(nodupes,n):
# Usage: Trims a specified number of bases from the 5' end of each read.
# Input: List of fastq files.
# Output: List of 5p trimmed files.
trimparam='-f'+str(n)
trimmedReads=[]
try:
for inread in nodupes:
outread=inread.replace(".fastq", "_5ptrimmed.fastq")
process=subprocess.Popen(['fastx_trimmer', trimparam, '-Q64', '-i', inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform 5' barcode trimming.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 5' barcode trimming.\n")
print "Problem with 5' barcode trimming."
print "Perform 5' barcode trimming."
trimmedReads5p=trimReads5p(nodupReads,iCLIP5pBasesToTrim)
# <codecell>
def runBowtie(fastqFiles,index,index_tag):
# Usage: Read mapping to reference.
# Input: Fastq files of replicate trimmed read files.
# Output: Path to samfile for each read.
program='bowtie2'
mappedReads=[]
unMappedReads=[]
try:
for infastq in fastqFiles:
outfile=infastq.replace(".fastq","_mappedTo%s.sam"%index_tag)
unmapped=infastq.replace(".fastq","_notMappedTo%s.fastq"%index_tag)
process=subprocess.Popen([program,'-x',index,'-k',k,'-U',infastq,'--un',unmapped,'-S',outfile],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout,stderr=process.communicate()
logOpen.write("Perform mapping to %s index.\n"%index_tag)
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
mappedReads = mappedReads + [outfile]
unMappedReads = unMappedReads + [unmapped]
return (mappedReads,unMappedReads)
except:
logOpen.write("Problem with mapping.\n")
print "Problem with mapping."
print "Run mapping to repeat index."
mappedReads_rep,unmappedReads_rep=runBowtie(trimmedReads5p,repeat_index,'repeat')
# <codecell>
def runSamtools(samfiles):
# Usage: Samfile processing.
# Input: Sam files from Bowtie mapping.
# Output: Sorted bedFiles.
program = 'samtools'
program2 = 'bamToBed'
outBedFiles=[]
try:
for samfile in samfiles:
bamfile = samfile.replace('.sam','.bam')
proc = subprocess.Popen( [program,'view','-bS','-o', bamfile, samfile])
proc.communicate()
bamfile_sort = bamfile.replace('.bam','_sorted')
proc2 = subprocess.Popen([program,'sort',bamfile, bamfile_sort])
proc2.communicate()
bedFile = bamfile_sort.replace('_sorted', '_withDupes.bed')
outfh = open(bedFile,'w')
proc3 = subprocess.Popen( [program2,'-i', bamfile_sort+'.bam'],stdout=outfh)
proc3.communicate()
outBedFiles=outBedFiles+[bedFile]
return outBedFiles
except:
logOpen.write("Problem with samtools.\n")
print "Problem with samtools."
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles_rep=runSamtools(mappedReads_rep)
# <codecell>
def seperateStrands(mappedReads):
# Usage: Seperate positive and negative strands.
# Input: Paths to two bed files from Samtools.
# Output: Paths to bed files isolated by strand.
negativeStrand=[]
positiveStrand=[]
for mapFile in mappedReads:
with open(mapFile, 'r') as infile:
neg_strand=mapFile.replace('.bed','_neg.bed')
pos_strand=mapFile.replace('.bed','_pos.bed')
neg = open(neg_strand, 'w')
pos = open(pos_strand, 'w')
negativeStrand=negativeStrand+[neg_strand]
positiveStrand=positiveStrand+[pos_strand]
for line in infile:
if str(line.strip().split('\t')[5]) == '-':
neg.write(line)
elif str(line.strip().split('\t')[5]) == '+':
pos.write(line)
return (negativeStrand,positiveStrand)
def modifyNegativeStrand(negativeStrandReads):
# Usage: For negative stranded reads, ensure 5' position (RT stop) is listed first.
# Input: Bed file paths to all negative stranded.
# Output: Paths to modified bed files.
negativeStrandEdit=[]
for negativeRead in negativeStrandReads:
neg_strand_edited=negativeRead.replace('_neg.bed','_negEdit.bed')
negativeStrandEdit=negativeStrandEdit+[neg_strand_edited]
neg_edit = open(neg_strand_edited, 'w')
with open(negativeRead, 'r') as infile:
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
neg_edit.write('\t'.join((chrom,end,str(int(end)+30),name,quality,strand))+'\n')
return negativeStrandEdit
def isolate5prime(strandedReads):
# Usage: Isolate only the Chr, 5' position (RT stop), and strand.
# Input: Bed file paths to strand seperated reads.
# Output: Paths RT stop files.
RTstops=[]
for reads in strandedReads:
RTstop=reads.replace('.bed','_RTstop.bed')
f = open(RTstop, 'w')
with open(reads, 'r') as infile:
RTstops=RTstops+[RTstop]
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
f.write('\t'.join((chrom,start,strand))+'\n')
return RTstops
print "RT stop isolation (repeat)."
logOpen.write("RT stop isolation (repeat).\n")
readsByStrand_rep=seperateStrands(mappedBedFiles_rep)
negativeRTstop_rep=isolate5prime(modifyNegativeStrand(readsByStrand_rep[0]))
positiveRTstop_rep=isolate5prime(readsByStrand_rep[1])
# <codecell>
def fileCat(destinationFile,fileList):
f = open(destinationFile, "w")
for tempfile in fileList:
readfile = open(tempfile, "r")
f.write(readfile.read())
readfile.close()
f.close()
def RTcounts(RTfile):
posRT_R1=pd.DataFrame(pd.read_table(RTfile,index_col=None,header=None,sep='\t'))
posRT_R1.columns=['Chr','Start','Strand']
cts=posRT_R1.groupby(['Chr','Start']).size()
return cts
def mergeRT(RTstopFiles,outfilename,threshold,expand,strand):
# Usage: Merge RT stops between replicates and keep only those positions that exceed threshold.
# Input: Files with RT stops for each replicate, outfile, threshold, strand, and bases to expand around RT stop.
# Output: None. Writes merged RT stop file.
cts_R1=RTcounts(RTstopFiles[0])
cts_R2=RTcounts(RTstopFiles[1])
m=pd.concat([cts_R1,cts_R2],axis=1,join='inner')
m.columns=['Rep_1','Rep_2']
m['Sum']=m['Rep_1']+m['Rep_2']
m_filter=m[m['Sum']>threshold]
f = open(outfilename, 'w')
for i in m_filter.index:
chrom=i[0]
RT=i[1]
count=m_filter.loc[i,'Sum']
if RT > expand:
read='\t'.join((chrom,str(int(RT)-expand),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
else:
read='\t'.join((chrom,str(int(RT)),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
f.write(read*(count))
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_repeat_positivereads.mergedRT'
strand='+'
mergeRT(positiveRTstop_rep,posMerged,threshold_rep,expand,strand)
negMerged=outfilepath+sampleName+'_repeat_negativereads.mergedRT'
strand='-'
mergeRT(negativeRTstop_rep,negMerged,threshold_rep,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold_rep+'_repeat_allreads.mergedRT.bed'
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
print "Run mapping to %s."%index_tag
mappedReads,unmappedReads=runBowtie(unmappedReads_rep,index,index_tag)
# <codecell>
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles=runSamtools(mappedReads)
# <codecell>
def runRepeatMask(mappedReads,repeatregions):
# Usage: Remove repeat regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools) and blastlist regions removed.
# Output: Bedfile with repeat regions removed.
program='intersectBed'
masked=[]
try:
for bedIn in mappedReads:
noRepeat=bedIn.replace('.bed','_noRepeat.bed')
outfh=open(noRepeat, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',repeatregions,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
masked=masked+[noRepeat]
return (masked)
except:
print "Problem with repeat masking."
logOpen.write("Problem with repeat masking.\n")
def runBlacklistRegions(mappedReads,blacklistregions):
# Usage: Remove blacklisted regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools).
# Output: Bedfile with blacklisted regions removed.
program='intersectBed'
blackListed=[]
try:
for bedIn in mappedReads:
noBlacklist=bedIn.replace('.bed','_noBlacklist.bed')
outfh=open(noBlacklist, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',blacklistregions,'-v'],stdout=outfh)
proc.communicate()
outfh.close()
blackListed=blackListed+[noBlacklist]
return (blackListed)
except:
print "Problem with blacklist."
logOpen.write("Problem with blacklist.\n")
print "Run repeat and blacklist region masker."
logOpen.write("Run repeat and blacklist masker.\n")
blacklistedBedFiles=runBlacklistRegions(mappedBedFiles,blacklistregions)
maskedBedFiles=runRepeatMask(blacklistedBedFiles,repeatregions)
# <codecell>
print "RT stop isolation."
logOpen.write("RT stop isolation.\n")
readsByStrand=seperateStrands(maskedBedFiles)
negativeRTstop=isolate5prime(modifyNegativeStrand(readsByStrand[0]))
positiveRTstop=isolate5prime(readsByStrand[1])
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_%s_positivereads.mergedRT'%index_tag
strand='+'
mergeRT(positiveRTstop,posMerged,threshold,expand,strand)
negMerged=outfilepath+sampleName+'_%s_negativereads.mergedRT'%index_tag
strand='-'
mergeRT(negativeRTstop,negMerged,threshold,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold+'_%s_allreads.mergedRT.bed'%index_tag
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
def runCLIPPER(RTclusterfile,genome,genomeFile):
# Useage: Process the mergedRT file and pass through CLIPper FDR script.
# Input: Merged RT file.
# Output: CLIPper input (.bed) file and output file.
program='bedToBam'
program2='samtools'
program3='bamToBed'
program4='clipper'
bamfile=RTclusterfile.replace('.bed','.bam')
outfh=open(bamfile, 'w')
proc=subprocess.Popen([program,'-i',RTclusterfile,'-g',genomeFile],stdout=outfh)
proc.communicate()
bamfile_sort=bamfile.replace('.bam','.srt')
proc2=subprocess.Popen([program2,'sort',bamfile,bamfile_sort])
proc2.communicate()
bamfile_sorted=bamfile_sort+'.bam'
mapStats=bamfile_sorted.replace('.srt.bam','.mapStats.txt')
outfh=open(mapStats, 'w')
proc3=subprocess.Popen([program2,'flagstat',bamfile_sorted],stdout=outfh)
proc3.communicate()
proc4=subprocess.Popen([program2,'index',bamfile_sorted])
proc4.communicate()
CLIPPERin=bamfile_sorted.replace('.srt.bam','_CLIPPERin.bed')
outfh=open(CLIPPERin, 'w')
proc5=subprocess.Popen([program3,'-i',bamfile_sorted],stdout=outfh)
proc5.communicate()
CLIPPERout=CLIPPERin.replace('_CLIPPERin.bed','_CLIP_clusters')
proc6=subprocess.Popen([program4,'--bam',bamfile_sorted,genome,'--outfile=%s'%CLIPPERout],)
proc6.communicate()
outfh.close()
return (CLIPPERin,CLIPPERout)
def makeGeneNameDict(fi):
# Usage: Make a dictionary that maps RT stop to gene name.
# Input: File path to intersected CLIPper windows and input RT stop coordinates.
# Output Dictionary mapping RT stop to name.
nameDict={}
with open(fi, 'r') as infile:
for read in infile:
elementList=read.strip().split('\t')
RT_id='_'.join((elementList[0],elementList[1],elementList[2],elementList[5]))
if RT_id not in nameDict:
geneName=elementList[9].strip().split(CLIPPERoutNameDelim)[0]
nameDict[RT_id]=geneName
return nameDict
def modCLIPPERout(CLIPPERin,CLIPPERout):
# Usage: Process the CLIPper output and isolate lowFDR reads based upon CLIPper windows.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Low FDR reads recovered using the CLIPer windows file, genes per cluster, gene list of CLIPper clusters, and CLIPper windows as .bed.
program='intersectBed'
CLIPperOutBed=CLIPPERout+'.bed'
CLIPpeReadsPerCluster=CLIPPERout+'.readsPerCluster'
CLIPpeGeneList=CLIPPERout+'.geneNames'
f = open(CLIPperOutBed,'w')
g = open(CLIPpeReadsPerCluster,'w')
h = open(CLIPpeGeneList,'w')
with open(CLIPPERout,'r') as infile:
for line in infile:
try:
# Note that different versions on CLIPper will report the gene name differently. So, we must handle this.
chrom,start,end,name,stats,strand,start_2,end_2 = line.strip().split('\t')
if CLIPPERoutNameDelim=='_':
readPerCluster=name.strip().split(CLIPPERoutNameDelim)[2]
else:
readPerCluster=(name.strip().split(CLIPPERoutNameDelim)[1]).split('_')[2]
geneName=name.strip().split(CLIPPERoutNameDelim)[0]
f.write('\t'.join((chrom,start,end,name,stats,strand))+'\n')
g.write((readPerCluster+'\n'))
h.write((geneName+'\n'))
except:
print ""
f.close()
g.close()
h.close()
# Intersect input reads with the CLIPper windows, report full result for both, include strand, do not duplicate reads from -a if they interset with multiple windows.
clusterWindowInt=CLIPperOutBed.replace('.bed','_fullClusterWindow.bed')
outfh=open(clusterWindowInt,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-wb','-s'],stdout=outfh)
proc.communicate()
outfh.close()
# Use the full window intersection to make a dictionary mapping RTstop to gene name.
nameDict=makeGeneNameDict(clusterWindowInt)
# Intersect input reads with CLIPper windows, but only report one intersection per read (as reads can overlap with multiple windows).
clusterWindowIntUniq=CLIPperOutBed.replace('.bed','_oneIntPerRead.bed')
outfh=open(clusterWindowIntUniq,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-s','-u'],stdout=outfh)
proc.communicate()
outfh.close()
# Process the uniquly intersected RT stops by adding gene name.
CLIPPERlowFDR=CLIPperOutBed.replace('.bed','_lowFDRreads.bed')
outfh=open(CLIPPERlowFDR,'w')
with open(clusterWindowIntUniq, 'r') as infile:
for read in infile:
bed=read.strip().split('\t')
RT_id='_'.join((bed[0],bed[1],bed[2],bed[5]))
geneName=nameDict[RT_id]
outfh.write('\t'.join((bed[0],bed[1],bed[2],geneName,bed[4],bed[5],'\n')))
outfh.close()
infile.close()
return (CLIPPERlowFDR,CLIPpeReadsPerCluster,CLIPpeGeneList,CLIPperOutBed)
print "Run CLIPper."
logOpen.write("Run CLIPper.\n")
CLIPPERio=runCLIPPER(negAndPosMerged,genomeForCLIPper,genomeFile)
CLIPPERin=CLIPPERio[0]
CLIPPERout=CLIPPERio[1]
clipperStats=modCLIPPERout(CLIPPERin,CLIPPERout)
CLIPPERlowFDR=clipperStats[0] # Low FDR reads returned filtred through CLIPper windows
CLIPpeReadsPerCluster=clipperStats[1] # Number of reads per CLIPper cluster
CLIPpeGeneList=clipperStats[2] # Gene names returned from the CLIPper file
CLIPperOutBed=clipperStats[3] # CLIPper windows as a bed file
# <codecell>
def getBedCenterPoints(inBed):
# Usage: Obtain ceter coordiantes of bedFile.
# Input: BedFile.
# Output: Center coodinates returned.
outBed=inBed.replace('.bed','_centerCoord.bed')
f=open(outBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],str(int(elementList[1])+expand),str(int(elementList[1])+expand+1),elementList[3],elementList[4],elementList[5],'\n')))
f.close()
return outBed
def cleanBedFile(inBed):
# Usage: Sort and recover only first 6 fields from a bed file.
# Input: BedFile.
# Output: Sorted bedFile with correct number of fields.
program='sortBed'
CLIPperOutBed=inBed.replace('.bed','_cleaned.bed')
sortedBed=CLIPperOutBed.replace('_cleaned.bed','_cleaned_sorted.bed')
f=open(CLIPperOutBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],elementList[1],elementList[2],elementList[3],elementList[4],elementList[5],'\n')))
f.close()
outfh=open(sortedBed, 'w')
proc=subprocess.Popen([program, '-i', CLIPperOutBed],stdout=outfh)
proc.communicate()
outfh.close()
return sortedBed
def makeBedGraph(lowFDRreads,sizesFile):
# Usage: From a bedFile, generate a bedGraph and bigWig.
# Input: BedFile.
# Output: BedGraph file.
program='genomeCoverageBed'
program2=os.getcwd() + '/bin/bedGraphToBigWig'
cleanBed=cleanBedFile(lowFDRreads)
outname=cleanBed.replace('.bed','.bedgraph')
outname2=cleanBed.replace('.bed','.bw')
outfh=open(outname,'w')
proc=subprocess.Popen([program,'-bg','-split','-i',cleanBed,'-g',sizesFile],stdout=outfh)
proc.communicate()
outfh2=open(outname2,'w')
proc2=subprocess.Popen([program2,outname,sizesFile,outname2],stdout=subprocess.PIPE)
proc2.communicate()
return outname
print "Make bedGraph"
logOpen.write("Make bedGraph.\n")
bedGraphCLIPout=makeBedGraph(CLIPPERlowFDR,genomeFile)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
# <codecell>
def filterSnoRNAs(proteinCodingReads,snoRNAmasker,miRNAmasker):
# Usage: Filter snoRNA and miRNAs from protein coding reads.
# Input: .bed file with protein coding reads.
# Output: snoRNA and miR filtered .bed file.
program='intersectBed'
proteinWithoutsnoRNAs=proteinCodingReads.replace('.bed','_snoRNAremoved.bed')
proteinWithoutmiRNAs=proteinWithoutsnoRNAs.replace('.bed','_miRNAremoved.bed')
outfh=open(proteinWithoutsnoRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinCodingReads,'-b',snoRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh=open(proteinWithoutmiRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinWithoutsnoRNAs,'-b',miRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
return (proteinWithoutmiRNAs)
def getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists):
# Usage: Given a list of genes, return all reads for the associated genes.
# Input: Gene list and the path to lowFDR read file.
# Output: List of reads assocaited with the given genes.
lowFDRgenelist=[]
for path in pathToGeneLists:
outfile=path+'_LowFDRreads.bed'
proc=subprocess.Popen('grep -F -f %s %s > %s'%(path,CLIPPERlowFDR,outfile),shell=True)
proc.communicate()
return_code=proc.wait() # *** Remove later. ***
lowFDRgenelist=lowFDRgenelist+[outfile]
return lowFDRgenelist
def compareLists(list1,list2,outname):
# Usage: Compare gene lists and output matches to the file.
# Input: Two gene lists.
# Output: Path file containing the matching genes.
f=open(list1,'r')
g=open(list2,'r')
commonGenes=set(f.readlines()) & set(g.readlines())
geneCategory=outname.split('.')[1]
outputName=outfilepath+'clipGenes_'+geneCategory
outfh=open(outputName,'w')
for gene in commonGenes:
outfh.write(gene)
outfh.close()
return outputName
def getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot):
# Usage: Get all genes listed under each type, compare to CLIPper targets.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Path to file containing all CLIPper genes of each type.
geneTypes=[]
for genepath in geneAnnot:
lowFDRgenes=compareLists(CLIPpeGeneList,genepath,os.path.split(genepath)[1])
geneTypes=geneTypes+[lowFDRgenes]
return geneTypes
print "Partition reads by type."
logOpen.write("Partition reads by type.\n")
pathToGeneLists=getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot)
pathToReadLists=getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists)
proteinCodingReads=outfilepath+'clipGenes_proteinCoding_LowFDRreads.bed'
proteinBedGraph=makeBedGraph(proteinCodingReads,genomeFile)
filteredProteinCodingCenters=filterSnoRNAs(getBedCenterPoints(proteinCodingReads),snoRNAmasker,miRNAmasker)
filteredProteinCentersBedGraph=makeBedGraph(filteredProteinCodingCenters,genomeFile)
lincRNAReads=outfilepath+'clipGenes_lincRNA_LowFDRreads.bed'
filteredLincRNACenters=filterSnoRNAs(getBedCenterPoints(lincRNAReads),snoRNAmasker,miRNAmasker)
# <codecell>
# --- #
# <codecell>
def sortFilteredBed(bedFile):
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
geneCounts=countHitsPerGene(bf)
return geneCounts
def countHitsPerGene(bf):
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
return geneCounts
def getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex):
program='intersectBed'
bedFile=outfilepath+'clipGenes_snoRNA_LowFDRreads.bed'
outfh=open(bedFile, 'w')
proc=subprocess.Popen([program,'-a',CLIPPERlowFDRcenters,'-b',snoRNAindex,'-s','-wa','-wb'],stdout=outfh)
proc.communicate()
outfh.close()
return bedFile
def countSnoRNAs(bedFile_sno):
bf=pd.DataFrame(pd.read_table(bedFile_sno,header=None))
bf.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
geneCounts=bf.groupby('name_snoRNA').size()
geneCounts.sort(ascending=False)
return geneCounts
def countRemainingGeneTypes(remaining):
for bedFile in remaining:
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
head,fname=os.path.split(bedFile)
geneType=fname.split("_")[1]
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_%s'%geneType
geneCounts.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "Generate sorted gene lists by gene type."
logOpen.write("Generate sorted gene lists by gene type.\n")
bedFile_pc=outfilepath+"clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_pc=sortFilteredBed(bedFile_pc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_proteinCoding'
geneCounts_pc.to_csv(outfilepathToSave)
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_linc=sortFilteredBed(bedFile_linc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_lincRNA'
geneCounts_linc.to_csv(outfilepathToSave)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
bedFile_sno=getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex)
geneCounts_sno=countSnoRNAs(bedFile_sno)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_snoRNA'
geneCounts_sno.to_csv(outfilepathToSave)
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
countRemainingGeneTypes(remaining)
# <codecell>
def makeClusterCenter(windowsFile):
# Usage: Generate a file of cluster centers.
# Input: Raw CLIPper output file.
# Output: File with coordinates for the center of each CLIPper cluster.
cleanBed = cleanBedFile(windowsFile)
centers=cleanBed.replace('.bed','.clusterCenter')
f = open(centers, 'w')
with open(cleanBed, 'r') as infile:
for line in infile:
elementList = line.strip().split('\t')
diff=abs(int((int(elementList[1])-int(elementList[2]))/2))
f.write(elementList[0]+'\t'+str(int(elementList[1])+diff)+'\t'+str(int(elementList[1])+diff+1)+'\n')
f.close()
return centers
def getClusterIntensity(bedGraph,centerCoordinates):
# Usage: Generate a matrix of read itensity values around CLIPper cluster center.
# Input: BedGraph and cluster center file.
# Output: Generates a matrix, which is passed into R.
program=os.getcwd() + '/bin/grep_chip-seq_intensity.pl'
program2='wait'
proc=subprocess.Popen(['perl',program, centerCoordinates, bedGraph],)
proc.communicate()
logOpen.write("Waiting for Cluster Intensity file completion...\n")
proc2=subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Get binding intensity around cluster centers."
logOpen.write("Get binding intensity around cluster centers.\n")
bedGraphCLIPin=makeBedGraph(CLIPPERin,genomeFile)
centerCoordinates=makeClusterCenter(CLIPperOutBed)
getClusterIntensity(bedGraphCLIPin,centerCoordinates)
# <codecell>
def partitionReadsByUTR(infile,UTRmask,utrReads,notutrReads):
program = 'intersectBed'
outfh = open(utrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-u','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh = open(notutrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
def extractUTRs(bedIn,fivePUTRBed,threePUTRBed,cdsBed):
# Usage: Extract all UTR specific reads from the input file.
# Input: .bed file
# Output: Mutually exclusive partitions of the input file.
fivePreads = bedIn.replace('.bed', '_5p.bed')
notFivePreads = bedIn.replace('.bed', '_NOT5p.bed')
partitionReadsByUTR(bedIn,fivePUTRBed,fivePreads,notFivePreads)
threePreads = bedIn.replace('.bed', '_3p.bed')
notThreePreads = bedIn.replace('.bed', '_NOT3p.bed')
partitionReadsByUTR(notFivePreads,threePUTRBed,threePreads,notThreePreads)
CDSreads = bedIn.replace('.bed', '_cds.bed')
notCDSreads = bedIn.replace('.bed', '_NOTcds.bed')
partitionReadsByUTR(notThreePreads,cdsBed,CDSreads,notCDSreads)
return (fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads)
print "Intron and UTR analysis."
logOpen.write("Intron and UTR analysis.\n")
fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads=extractUTRs(filteredProteinCodingCenters,fivePUTRBed,threePUTRBed,cdsBed)
geneCounts_5p=sortFilteredBed(fivePreads)
geneCounts_3p=sortFilteredBed(threePreads)
geneCounts_cds=sortFilteredBed(CDSreads)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_5pUTR'
geneCounts_5p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_3pUTR'
geneCounts_3p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_CDS'
geneCounts_cds.to_csv(outfilepathToSave)
# <codecell>
def makeTab(bedGraph,genesFile,sizesFile):
program = os.getcwd() + '/bin/bedGraph2tab.pl'
program2 = 'wait'
outfile=bedGraph.replace('.bedgraph','.tab')
proc = subprocess.Popen(['perl',program,genesFile,sizesFile,bedGraph,outfile],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
return outfile
def makeAvgGraph(bedGraph,utrFile,genesFile,sizesFile):
# Usage: Generate a matrix of read itensity values across gene body.
# Input: BedGraph.
# Output: Generates two matricies.
program= os.getcwd() + '/bin/averageGraph_scaled_tab.pl'
program2 = 'wait'
tabFile=makeTab(bedGraph,genesFile,sizesFile)
outhandle=tabFile.replace('.tab','_UTRs')
proc = subprocess.Popen(['perl',program,utrFile,tabFile,tabFile,outhandle],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Gene body analysis."
logOpen.write("Gene body analysis.\n")
bedGraphProtein=makeBedGraph(bedFile_pc,genomeFile)
makeAvgGraph(bedGraphProtein,utrFile,genesFile,sizesFile)
# <codecell>
def getGeneStartStop(bedFile,geneRef):
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','End','Strand']]
outfilepathToSave=bedFile.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "ncRNA gene body anaysis."
geneStartStopRepo=os.getcwd()+'/docs/all_genes.txt'
geneRef=pd.DataFrame(pd.read_table(geneStartStopRepo))
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
for bedFile in remaining:
st_stop=getGeneStartStop(bedFile,geneRef)
# lincRNA file processing
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
bf=pd.DataFrame(pd.read_table(bedFile_linc,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','Stop','Strand']]
outfilepathToSave=bedFile_linc.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
def readBed(path):
bedFile = pd.read_table(path,dtype=str,header=None)
bedFile.columns=['Index','Start','Stop','Name','QS','Strand']
bedFile['Start']=bedFile['Start'].astype(int)
return bedFile
print "Record repeat RNA."
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
repeatAnnotDF.set_index('Name',inplace=True,drop=False)
# Get merged data for repeat index.
repeatMerged=glob.glob(outfilepath+"*repeat_allreads.mergedRT.bed")
rep=pd.read_table(repeatMerged[0],dtype=str,header=None)
rep.columns=['Rep_index','Start','Stop','Read_name','Q','Strand']
rep['RT_stop']=rep['Start'].astype(int)+expand
for ix in repeatAnnotDF.index:
end=repeatAnnotDF.loc[ix,'IndexEnd']
repName=repeatAnnotDF.loc[ix,'Name']
gene_hits=rep[(rep['RT_stop']<int(repeatAnnotDF.loc[ix,'IndexEnd']))&(rep['RT_stop']>int(repeatAnnotDF.loc[ix,'IndexStart']))]
gene_hits['Repeat_End']=repeatAnnotDF.loc[ix,'IndexEnd']
gene_hits['Repeat_Start']=repeatAnnotDF.loc[ix,'IndexStart']
outfilepathToSave=outfilepath + '/PlotData_RepeatRNAreads_%s'%repName
gene_hits.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
# <codecell>
def lineCount(filename):
i=0
with open(filename) as f:
for i,l in enumerate(f):
pass
return i+1
def plot_ReadAccounting(outfilepath,sampleName):
rawRead1=infilepath+sampleName+'_R1.fastq'
rawRead2=infilepath+sampleName+'_R2.fastq'
reads3pTrim=[outfilepath+sampleName+'_R1_3ptrimmed.fastq',outfilepath+sampleName+'_R2_3ptrimmed.fastq']
readsFilter=[outfilepath+sampleName+'_R1_3ptrimmed_filter.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter.fastq']
readsNoDupes=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe.fastq']
readsMappedReapeat=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed']
readsMappedHg19=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedBlacklist=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedRepeatMask=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag]
clipperIN=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIPPERin.bed'%(threshold,index_tag)
clipperOUT=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_lowFDRreads.bed'%(threshold,index_tag)
fileNames=['Raw (R1)','Raw (R2)','3p Trim (R1)','3p Trim (R2)','Filter (R1)','Filter (R2)','No dupes (R1)','No dupes (R2)','RepeatMapped (R1)','RepeatMapped (R2)','Hg19Mapped (R1)','Hg19Mapped (R2)','Blacklist (R1)','Blacklist (R2)','RepeatMask (R1)','RepeatMask (R2)','ClipperIn','ClipperOut']
filesToCount=[rawRead1,rawRead2,reads3pTrim[0],reads3pTrim[1],readsFilter[0],readsFilter[1],readsNoDupes[0],readsNoDupes[1],readsMappedReapeat[0],readsMappedReapeat[1],readsMappedHg19[0],readsMappedHg19[1],readsMappedBlacklist[0],readsMappedBlacklist[1],readsMappedRepeatMask[0],readsMappedRepeatMask[1],clipperIN,clipperOUT]
counts=[]
counter=0
for fileString in filesToCount:
temp=lineCount(fileString)
if counter < 8:
temp=temp/4 # Fastq files
counts=counts+[temp]
counter += 1
ind = np.arange(len(counts)) + 0.5
plt.barh(ind,list(reversed(np.log10(np.array(counts)))),align='center',color='blue')
plt.xlabel('log10(Counts per file)',fontsize=5)
locs,pltlabels = plt.xticks(fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.yticks(ind,list(reversed(fileNames)),fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
ax=plt.gca()
for line in ax.get_yticklines():
line.set_markersize(0)
plt.title('Read counts',fontsize=5)
readDF=pd.DataFrame()
readDF['File_name']=fileNames
readDF['Reads_per_file']=counts
outfilepathToSave=outfilepath + '/PlotData_ReadsPerPipeFile'
readDF.to_csv(outfilepathToSave)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
# <codecell>
def plot_BoundGeneTypes(outfilepath,sampleName):
record=pd.DataFrame()
# Exclude specific files (e.g., UTR-specific reads).
geneListToPlot=[f for f in glob.glob(outfilepath+'PlotData_ReadsPerGene_*') if '5pUTR' not in f and '3pUTR' not in f and 'CDS' not in f]
for boundGenes in geneListToPlot:
glist=pd.read_csv(boundGenes,header=None)
glist.columns=['GeneName','Count']
gName=boundGenes.split('_')[-1]
record.loc[gName,'genesBound']=glist.shape[0]
record.loc[gName,'totalReads']=glist['Count'].sum()
record.sort('genesBound',inplace=True)
outfilepathToSave=outfilepath + '/PlotData_ReadAndGeneCountsPerGenetype'
record.to_csv(outfilepathToSave)
ind = np.arange(record.shape[0]) + 0.5
plt.bar(ind,record['genesBound'],align='center',color='blue')
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(ind,record.index,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.tick_params(axis='xticks',labelsize=5)
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Number of genes bound',fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
plt.title('Bound genes by class',fontsize=5)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
# <codecell>
def plot_ReadsPerCluster(outfilepath,sampleName):
readPerCluster=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters.readsPerCluster'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(readPerCluster,header=None))
clust.columns=['ReadsPerCluster']
clust=clust['ReadsPerCluster']
interval=10
bins=range(min(clust)-10,max(clust)+10,interval)
hist,bins=np.histogram(clust,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center, hist,align='center',width=width)
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(center,center,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=3.5)
plt.tick_params(axis='yticks',labelsize=5)
plt.xlabel('Reads per cluster (bin=%s)'%interval,fontsize=5)
plt.ylabel('Frequency (RT stop count)',fontsize=5)
plt.title('Reads per cluster',fontsize=5)
plt.xlim(0,100) # Make the histogram easy to view.
# plt.xlim(-interval,np.max(center)+interval)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
# <codecell>
def plot_ClusterSizes(outfilepath,sampleName):
clipClusters=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(clipClusters,header=None,skiprows=1))
clust.columns=['chr','start','end','name','score','strand','m1','m2']
clust['clusterSize']=clust['start']-clust['end']
clust['clusterSize']=clust['clusterSize'].apply(lambda x: math.fabs(x))
plt.boxplot(clust['clusterSize'])
plt.tick_params(axis='x',labelbottom='off')
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Cluster length (bases)',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.title('Cluster size',fontsize=5)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
# <codecell>
def plot_clusterBindingIntensity(outfilepath,sampleName):
clusterCenterHeatmap=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_cleaned_sorted.clusterCenter_heatmap.txt'%(threshold,index_tag)
hmap=pd.DataFrame(pd.read_table(clusterCenterHeatmap,header=None,skiprows=1))
hmap_vals=hmap.ix[:,1:]
sums=hmap_vals.sum(axis=1)
hmap_vals=hmap_vals.loc[np.argsort(sums),:]
plt.ylim(0,hmap_vals.shape[0])
p=plt.pcolormesh(np.array(hmap_vals),cmap='Blues')
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('Cluster position',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.ylabel('Cluster number',fontsize=5)
plt.title('Read distribution',fontsize=5)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
# <codecell>
def readUTRfile(path):
geneCounts=pd.read_csv(path,header=None)
geneCounts.columns=['Gene_name','Count']
return geneCounts
def plot_readsBymRNAregion(outfilepath,sampleName):
pc_5pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')['Count'].sum()
pc_3pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')['Count'].sum()
pc_CDSReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')['Count'].sum()
non_intronic=pc_5pReads+pc_3pReads+pc_CDSReads
allProteinCoding=outfilepath +'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed'
all_pc=pd.DataFrame(pd.read_table(allProteinCoding,header=None))
pc_allReads=all_pc.shape[0]
v=[float(pc_allReads-non_intronic)/pc_allReads,float(pc_5pReads)/pc_allReads,float(pc_CDSReads)/pc_allReads,float(pc_3pReads)/pc_allReads]
pie_wedges=ax.pie(v,labels=["Intronic","5p UTR","CDS","3pUTR"],labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
# <codecell>
fig1=plt.figure(1)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
fig1.tight_layout()
fig1.savefig(outfilepath+'Figure1.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig1.savefig(outfilepath+'Figure1.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_mRNAgeneBodyDist(outfilepath,sampleName):
averageGraph=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_averageGraph.txt'
hmap=pd.DataFrame(pd.read_table(averageGraph,header=None,skiprows=1))
hmap=hmap.set_index(0)
avgTrace=hmap.loc['treat',:]
plt.plot(avgTrace,color='blue',linewidth='2')
plt.vlines(200,0,np.max(avgTrace),linestyles='dashed')
plt.vlines(400,0,np.max(avgTrace),linestyles='dashed')
plt.ylim(0,np.max(avgTrace))
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('mRNA gene body (5pUTR, CDS, 3pUTR)')
plt.ylabel('Read density')
plt.tick_params(axis='y',labelsize=5)
plt.title('CLIP signal across average mRNA transcript.',fontsize=5)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
# <codecell>
def convertENBLids(enst_name):
ensg_name=ensemblGeneAnnot.loc[enst_name,'name2']
return ensg_name
def getUTRbindingProfile(utr,hmap_m):
if utr=='5p':
ix=(hmap_m[range(201,601)].sum(axis=1)==0)&(hmap_m[range(1,201)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')
elif utr=='3p':
ix=(hmap_m[range(1,401)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')
else:
ix=(hmap_m[range(1,201)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)==0)&(hmap_m[range(201,401)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')
# Ensure all genes are also identified in pre-allocated gene lists.
hmap_m_utrSpec=hmap_m.ix[ix,:]
hmap_m_utrSpec_filter=pd.merge(hmap_m_utrSpec,screen,left_on='ENSG_ID',right_on='Gene_name',how='inner')
sums=hmap_m_utrSpec_filter[range(1,601)].sum(axis=1)
hmap_m_utrSpec_filter=hmap_m_utrSpec_filter.loc[np.argsort(sums),:]
return hmap_m_utrSpec_filter
def plot_geneBodyPartition(outfilepath,sampleName):
treatMatrix=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_treatmatrix.txt'
hmap=pd.DataFrame(pd.read_table(treatMatrix,header=None,skiprows=1))
# Ensure genes recoverd from this analysis are indepdently indentified using partitioning of CLIPper cluster data.
hmap['ENSG_ID']=hmap.ix[:,0].apply(convertENBLids)
bound_pc = outfilepath+'clipGenes_proteinCoding'
pc_genes=pd.DataFrame(pd.read_table(bound_pc,header=None,))
pc_genes.columns=['ENSG_ID']
hmap_m=pd.merge(hmap,pc_genes,left_on='ENSG_ID',right_on='ENSG_ID',how='inner')
# Isolate intronic bound genes.
tosave=outfilepath+'PlotData_ExclusiveBound_Intronic'
intronicBoundGenes=list(set(pc_genes['ENSG_ID'])-set(hmap_m['ENSG_ID']))
np.savetxt(tosave,np.array(intronicBoundGenes),fmt="%s")
# UTR specific genes.
geneTypes=['5p','cds','3p']
depth=50
for i in range(0,3):
utrMatrix=getUTRbindingProfile(geneTypes[i],hmap_m)
tosave=outfilepath+'PlotData_ExclusiveBound_%s'%geneTypes[i]
np.savetxt(tosave,utrMatrix['ENSG_ID'],fmt="%s")
plt.subplot2grid((2,3),(1,i),colspan=1)
dataToPlot=utrMatrix[range(1,601)]
p=plt.pcolormesh(np.array(dataToPlot)[-depth:-1,:],cmap='Blues')
plt.title(geneTypes[i],fontsize=5)
plt.vlines(200,0,depth,linestyles='dashed')
plt.vlines(400,0,depth,linestyles='dashed')
plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
plt.ylim(0,depth)
plt.ylabel('Ranked genes (highest on bottom)',fontsize=5)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.title('%s specific genes: %s'%(geneTypes[i],np.unique(utrMatrix['ENSG_ID']).shape[0]),fontsize=5)
ensemblGeneAnnot=pd.DataFrame(pd.read_table(genesFile))
ensemblGeneAnnot=ensemblGeneAnnot.set_index('name') # Make ENST the index
plot_geneBodyPartition(outfilepath,sampleName)
# <codecell>
fig2=plt.figure(2)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
plot_geneBodyPartition(outfilepath,sampleName)
fig2.tight_layout()
fig2.savefig(outfilepath+'Figure2.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig2.savefig(outfilepath+'Figure2.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_repeatRNA(outfilepath,sampleName):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repFiles=glob.glob(outfilepath + '/PlotData_RepeatRNAreads_*')
repFiles=[repFile for repFile in repFiles if 'rDNA' not in repFile]
plotDim=math.ceil(math.sqrt(len(repFiles)))
i=0
for path in repFiles:
name=path.split('RepeatRNAreads_')[-1]
try:
# Read in each RT stop file
hits_per_rep=pd.read_csv(path)
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
# Histogram of RT stops across gene body
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
# Normalize
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
# Subplot
plt.subplot(plotDim,plotDim,i+1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
i+=1
except:
print "No reads for repeatRNA %s"%name
plt.tight_layout()
fig3=plt.figure(3)
plot_repeatRNA(outfilepath,sampleName)
fig3.tight_layout()
fig3.savefig(outfilepath+'Figure3.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig3.savefig(outfilepath+'Figure3.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_rDNA(outfilepath,sampleName):
plt.subplot2grid((3,3),(0,0),colspan=3)
name='rDNA'
rDNA=glob.glob(outfilepath + 'PlotData_RepeatRNAreads_rDNA')
hits_per_rep=pd.read_csv(rDNA[0])
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
# Features of rDNA with respect to start of the bowtie index (index=0)
rRNAstart=start
plt.axvspan(start18s+rRNAstart,end18s+rRNAstart,facecolor='g',alpha=0.5)
plt.axvspan(start5s+rRNAstart,end5s+rRNAstart,facecolor='r',alpha=0.5)
plt.axvspan(start28s+rRNAstart,end28s+rRNAstart,facecolor='b',alpha=0.5)
# Generate histogram for transcribed region
plt.subplot2grid((3,3),(1,0),colspan=3)
datarDNAOnly=RTpositions-start
bins=range((start-start),(end-start+2),1)
hist,bins=np.histogram(datarDNAOnly,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.axvspan(start18s,end18s,facecolor='g',alpha=0.5)
plt.axvspan(start5s,end5s,facecolor='r',alpha=0.5)
plt.axvspan(start28s,end28s,facecolor='b',alpha=0.5)
plt.xlim(0,rRNAend)
# Individual regions
plt.subplot2grid((3,3),(2,0),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='green',alpha=0.75)
plt.xlim(start18s,end18s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('18s Region',fontsize=5)
plt.subplot2grid((3,3),(2,1),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='red',alpha=0.75)
plt.xlim(start5s,end5s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('5.8s Region',fontsize=5)
plt.subplot2grid((3,3),(2,2),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.75)
plt.xlim(start28s,end28s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('28s Region',fontsize=5)
plt.tight_layout()
fig4=plt.figure(4)
plot_rDNA(outfilepath,sampleName)
fig4.tight_layout()
fig4.savefig(outfilepath+'Figure4.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig4.savefig(outfilepath+'Figure4.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getBindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['strand_snoRNA']=='-']
neg_data['diff']=np.abs(neg_data['Stop_snoRNA']-neg_data['Start'])
neg_data['frac']=neg_data['diff']/(neg_data['Stop_snoRNA']-neg_data['Start_snoRNA'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['strand_snoRNA']=='+']
pos_data['diff']=np.abs(pos_data['Start_snoRNA']-pos_data['Start'])
pos_data['frac']=pos_data['diff']/(pos_data['Stop_snoRNA']-pos_data['Start_snoRNA'])
DF_snoProfile=pd.concat([neg_data,pos_data])
return DF_snoProfile
print "snoRNA gene body anaysis."
# logOpen.write("Gene body analysis.\n")
bf_sno=pd.read_table(outfilepath+"clipGenes_snoRNA_LowFDRreads.bed",header=None)
bf_sno.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
snoTypes=pd.DataFrame(bf_sno.groupby('Type').size())
snoTypes.columns=['Reads']
snoTypes['Fraction']=snoTypes['Reads']/snoTypes['Reads'].sum(axis=1)
outfilepathToSave=outfilepath+'/PlotData_readsPerSnoRNAType'
snoTypes.to_csv(outfilepathToSave)
snoTypesAndGenes=pd.DataFrame(bf_sno.groupby(['Type','name_snoRNA']).size())
snoTypesAndGenes.columns=['Count_per_gene']
outfilepathToSave=outfilepath+'/PlotData_geneStatsPerSnoRNAType'
snoTypesAndGenes.to_csv(outfilepathToSave)
fig5=plt.figure(5)
ax=plt.subplot(2,2,1)
pie_wedges=ax.pie(snoTypes['Fraction'],labels=snoTypes.index,labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
i=2
for sType in set(bf_sno['Type']):
type_specific=bf_sno[bf_sno['Type']==sType]
sno_profile=getBindingFrac(type_specific)
if sType=='C':
title="C/D_box"
elif sType=='H':
title="H/ACA_box"
else:
title="scaRNA"
outfilepathToSave=outfilepath+'/PlotData_snoRNAReadDist_%s'%sType
sno_profile.to_csv(outfilepathToSave)
plt.subplot(2,2,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(sno_profile['frac'],bins=bins)
hist=np.array(hist/float(sno_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%title,fontsize=5)
plt.xlim([0,1])
# Record data
storageDF=pd.DataFrame()
storageDF['bins']=pd.Series(bins)
storageDF['hist']=pd.Series(hist)
outfilepathToSave=outfilepath+'/PlotData_snoRNAhistogram_%s'%sType
storageDF.to_csv(outfilepathToSave)
i+=1
fig5.tight_layout()
fig5.savefig(outfilepath+'Figure5.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig5.savefig(outfilepath+'Figure5.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getncRNABindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['Strand']=='-']
neg_data['diff']=np.abs(neg_data['Gene End (bp)']-neg_data['RT_stop'])
neg_data['frac']=neg_data['diff']/(neg_data['Gene End (bp)']-neg_data['Gene Start (bp)'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['Strand']=='+']
pos_data['diff']=np.abs(pos_data['Gene Start (bp)']-pos_data['RT_stop'])
pos_data['frac']=pos_data['diff']/(pos_data['Gene End (bp)']-pos_data['Gene Start (bp)'])
DF_ncRNAProfile=pd.concat([neg_data,pos_data])
return DF_ncRNAProfile
print "ncRNA gene body anaysis."
st_stopFiles=glob.glob(outfilepath+"*.geneStartStop")
st_stopFiles=[f for f in st_stopFiles if 'rRNA' not in f]
fig6=plt.figure(6)
plotDim=math.ceil(math.sqrt(len(st_stopFiles)))
i=1
for st_file in st_stopFiles:
name=st_file.split('clipGenes_')[1].split('_LowFDRreads')[0]
tmp=pd.read_csv(st_file)
tmp['RT_stop']=tmp['Start']+expand
tmp_profile=getncRNABindingFrac(tmp)
plt.subplot(plotDim,plotDim,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(tmp_profile['frac'],bins=bins)
hist=np.array(hist/float(tmp_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%name,fontsize=5)
i+=1
fig6.tight_layout()
fig6.savefig(outfilepath+'Figure6.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig6.savefig(outfilepath+'Figure6.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
logOpen.close()
# <codecell>
| gpl-2.0 |
chrjxj/zipline | tests/test_transforms_talib.py | 12 | 6304 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numpy as np
import pandas as pd
import talib
from datetime import timedelta, datetime
from unittest import TestCase, skip
from zipline.utils.test_utils import setup_logger, teardown_logger
import zipline.utils.factory as factory
from zipline.finance.trading import TradingEnvironment
from zipline.test_algorithms import TALIBAlgorithm
import zipline.transforms.ta as ta
class TestTALIB(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@classmethod
def tearDownClass(cls):
del cls.env
def setUp(self):
setup_logger(self)
sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1990, 3, 30, tzinfo=pytz.utc))
self.source, self.panel = \
factory.create_test_panel_ohlc_source(sim_params, self.env)
def tearDown(self):
teardown_logger(self)
@skip
def test_talib_with_default_params(self):
BLACKLIST = ['make_transform', 'BatchTransform',
# TODO: Figure out why MAVP generates a KeyError
'MAVP']
names = [name for name in dir(ta)
if name[0].isupper() and name not in BLACKLIST]
for name in names:
print(name)
zipline_transform = getattr(ta, name)(sid=0)
talib_fn = getattr(talib.abstract, name)
start = datetime(1990, 1, 1, tzinfo=pytz.utc)
end = start + timedelta(days=zipline_transform.lookback + 10)
sim_params = factory.create_simulation_parameters(
start=start, end=end)
source, panel = \
factory.create_test_panel_ohlc_source(sim_params, self.env)
algo = TALIBAlgorithm(talib=zipline_transform)
algo.run(source)
zipline_result = np.array(
algo.talib_results[zipline_transform][-1])
talib_data = dict()
data = zipline_transform.window
# TODO: Figure out if we are clobbering the tests by this
# protection against empty windows
if not data:
continue
for key in ['open', 'high', 'low', 'volume']:
if key in data:
talib_data[key] = data[key][0].values
talib_data['close'] = data['price'][0].values
expected_result = talib_fn(talib_data)
if isinstance(expected_result, list):
expected_result = np.array([e[-1] for e in expected_result])
else:
expected_result = np.array(expected_result[-1])
if not (np.all(np.isnan(zipline_result)) and
np.all(np.isnan(expected_result))):
self.assertTrue(np.allclose(zipline_result, expected_result))
else:
print('--- NAN')
# reset generator so next iteration has data
# self.source, self.panel = \
# factory.create_test_panel_ohlc_source(self.sim_params)
def test_multiple_talib_with_args(self):
zipline_transforms = [ta.MA(timeperiod=10),
ta.MA(timeperiod=25)]
talib_fn = talib.abstract.MA
algo = TALIBAlgorithm(talib=zipline_transforms, identifiers=[0])
algo.run(self.source)
# Test if computed values match those computed by pandas rolling mean.
sid = 0
talib_values = np.array([x[sid] for x in
algo.talib_results[zipline_transforms[0]]])
np.testing.assert_array_equal(talib_values,
pd.rolling_mean(self.panel[0]['price'],
10).values)
talib_values = np.array([x[sid] for x in
algo.talib_results[zipline_transforms[1]]])
np.testing.assert_array_equal(talib_values,
pd.rolling_mean(self.panel[0]['price'],
25).values)
for t in zipline_transforms:
talib_result = np.array(algo.talib_results[t][-1])
talib_data = dict()
data = t.window
# TODO: Figure out if we are clobbering the tests by this
# protection against empty windows
if not data:
continue
for key in ['open', 'high', 'low', 'volume']:
if key in data:
talib_data[key] = data[key][0].values
talib_data['close'] = data['price'][0].values
expected_result = talib_fn(talib_data, **t.call_kwargs)[-1]
np.testing.assert_allclose(talib_result, expected_result)
def test_talib_with_minute_data(self):
ma_one_day_minutes = ta.MA(timeperiod=10, bars='minute')
# Assert that the BatchTransform window length is enough to cover
# the amount of minutes in the timeperiod.
# Here, 10 minutes only needs a window length of 1.
self.assertEquals(1, ma_one_day_minutes.window_length)
# With minutes greater than the 390, i.e. one trading day, we should
# have a window_length of two days.
ma_two_day_minutes = ta.MA(timeperiod=490, bars='minute')
self.assertEquals(2, ma_two_day_minutes.window_length)
# TODO: Ensure that the lookback into the datapanel is returning
# expected results.
# Requires supplying minute instead of day data to the unit test.
# When adding test data, should add more minute events than the
# timeperiod to ensure that lookback is behaving properly.
| apache-2.0 |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/filter/filter.py | 1 | 8505 | from joblib import delayed, Parallel
import pandas as pd
import pyprind
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
class Filter(object):
"""Filter base class.
"""
def __init__(self, allow_missing=False):
self.allow_missing = allow_missing
def filter_candset(self, candset,
candset_l_key_attr, candset_r_key_attr,
ltable, rtable,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
n_jobs=1, show_progress=True):
"""Finds candidate matching pairs of strings from the input candidate
set.
Args:
candset (DataFrame): input candidate set.
candset_l_key_attr (string): attribute in candidate set which is a
key in left table.
candset_r_key_attr (string): attribute in candidate set which is a
key in right table.
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_filter_attr (string): attribute in left table on which the filter
should be applied.
r_filter_attr (string): attribute in right table on which the filter
should be applied.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is
given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used (where n_cpus is the total
number of CPUs in the machine). Thus for n_jobs = -2, all CPUs
but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1,
then no parallel computing code will be used (i.e., equivalent
to the default).
show_progress (boolean): flag to indicate whether task progress
should be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs from the candidate set that
survive the filter (DataFrame).
"""
# check if the input candset is a dataframe
validate_input_table(candset, 'candset')
# check if the candset key attributes exist
validate_attr(candset_l_key_attr, candset.columns,
'left key attribute', 'candset')
validate_attr(candset_r_key_attr, candset.columns,
'right key attribute', 'candset')
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes filter join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_filter_attr, ltable.columns,
'filter attribute', 'left table')
validate_attr(r_filter_attr, rtable.columns,
'filter attribute', 'right table')
# check if the filter attributes are not of numeric type
validate_attr_type(l_filter_attr, ltable[l_filter_attr].dtype,
'filter attribute', 'left table')
validate_attr_type(r_filter_attr, rtable[r_filter_attr].dtype,
'filter attribute', 'right table')
# check if the key attributes are unique and do not contain
# missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# check for empty candset
if candset.empty:
return candset
# Do a projection on the input dataframes to keep only required
# attributes. Note that this does not create a copy of the dataframes.
# It only creates a view on original dataframes.
ltable_projected = ltable[[l_key_attr, l_filter_attr]]
rtable_projected = rtable[[r_key_attr, r_filter_attr]]
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(candset))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = _filter_candset_split(candset,
candset_l_key_attr, candset_r_key_attr,
ltable_projected, rtable_projected,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
self, show_progress)
else:
# if n_jobs is above 1, split the candset into n_jobs splits and
# filter each candset split in a separate process.
candset_splits = split_table(candset, n_jobs)
results = Parallel(n_jobs=n_jobs)(delayed(_filter_candset_split)(
candset_splits[job_index],
candset_l_key_attr, candset_r_key_attr,
ltable_projected, rtable_projected,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
self,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
return output_table
def _filter_candset_split(candset,
candset_l_key_attr, candset_r_key_attr,
ltable, rtable,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
filter_object, show_progress):
# Find column indices of key attr and filter attr in ltable
l_columns = list(ltable.columns.values)
l_key_attr_index = l_columns.index(l_key_attr)
l_filter_attr_index = l_columns.index(l_filter_attr)
# Find column indices of key attr and filter attr in rtable
r_columns = list(rtable.columns.values)
r_key_attr_index = r_columns.index(r_key_attr)
r_filter_attr_index = r_columns.index(r_filter_attr)
# Build a dictionary on ltable
ltable_dict = build_dict_from_table(ltable, l_key_attr_index,
l_filter_attr_index,
remove_null=False)
# Build a dictionary on rtable
rtable_dict = build_dict_from_table(rtable, r_key_attr_index,
r_filter_attr_index,
remove_null=False)
# Find indices of l_key_attr and r_key_attr in candset
candset_columns = list(candset.columns.values)
candset_l_key_attr_index = candset_columns.index(candset_l_key_attr)
candset_r_key_attr_index = candset_columns.index(candset_r_key_attr)
valid_rows = []
if show_progress:
prog_bar = pyprind.ProgBar(len(candset))
for candset_row in candset.itertuples(index = False):
l_id = candset_row[candset_l_key_attr_index]
r_id = candset_row[candset_r_key_attr_index]
l_row = ltable_dict[l_id]
r_row = rtable_dict[r_id]
valid_rows.append(not filter_object.filter_pair(
l_row[l_filter_attr_index],
r_row[r_filter_attr_index]))
if show_progress:
prog_bar.update()
return candset[valid_rows]
| bsd-3-clause |
JVillella/tensorflow | tensorflow/examples/learn/iris.py | 29 | 2313 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
xwolf12/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/linear_model/tests/test_huber.py | 25 | 6981 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_, huber_coef)
assert_array_almost_equal(huber.intercept_, huber_intercept)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber.coef_, huber_coef, 3)
assert_array_almost_equal(huber.intercept_, huber_intercept, 3)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber_sparse.coef_, huber_coef, 3)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=5, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=1000000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
# And as said above, the first iteration seems to be run anyway.
if huber_warm.n_iter_ is not None:
assert_equal(1, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
RPGOne/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
sequana/sequana | sequana/scripts/main.py | 1 | 26623 | #-*- coding: utf-8 -*-
import sys
import os
import glob
import click
#import click_completion
#click_completion.init()
from sequana import version
import functools
__all__ = ["main"]
import sequana
import colorlog
logger = colorlog.getLogger(__name__)
# This can be used by all commands as a simple decorator
def common_logger(func):
@click.option("--logger", default="INFO",
type=click.Choice(["INFO", "DEBUG", "WARNING", "CRITICAL", "ERROR"]))
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def get_env_vars(ctx, args, incomplete):
return [k for k in os.environ.keys() if incomplete in k]
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import pkg_resources
pipelines = [item.key for item in pkg_resources.working_set if item.key.startswith("sequana")]
if len(pipelines):
version +="\nThe following pipelines are installed:\n"
for item in pkg_resources.working_set:
if item.key.startswith("sequana") and item.key != 'sequana':
version += "\n - {} version: {}".format(item.key, item.version)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=version)
def main(**kwargs):
"""\bThis is the main entry point for a set of Sequana applications.
Pipelines such as sequana_rnaseq, sequana_variant_calling have their own
application and help.
In addition, more advanced tools such as sequana_taxonomy or
sequana_coverage have their own standalone.
"""
pass
@main.command()
@click.argument('filename', type=click.STRING, nargs=-1)
@click.option("-o", "--output",
help="filename where to save results. to be used with --head, --tail")
@click.option("--count-reads", is_flag=True)
@click.option("--head", type=click.INT,
help='number of reads to extract from the head')
@click.option("--merge", is_flag=True)
@click.option("--tail", type=click.INT,
help="number of reads to extract from the tail")
def fastq(**kwargs):
"""Set of useful utilities for FastQ manipulation.
Input file can be gzipped or not. The --output-file
"""
from sequana.fastq import FastQ
filenames = kwargs['filename']
# users may provide a wildcards such as "A*gz" or list of files.
if len(filenames) == 1:
# if existing files or glob, a glob would give the same answer.
filenames = glob.glob(filenames[0])
for filename in filenames:
os.path.exists(filename)
# could be simplified calling count_reads only once
if kwargs['count_reads']:
for filename in filenames:
f = FastQ(filename)
Nreads = f.count_reads()
Nlines = Nreads * 4
print(f"Number of reads in {filename}: {Nreads}")
print(f"Number of lines in {filename}: {Nlines}")
elif kwargs['head']:
for filename in filenames:
f = FastQ(filename)
if kwargs['output'] is None:
logger.error("Please use --output to tell us where to save the results")
sys.exit(1)
N = kwargs['head'] * 4
f.extract_head(N=N, output_filename=kwargs['output'])
elif kwargs['tail']: #pragma: no cover
raise NotImplementedError
elif kwargs['merge']:
import subprocess
# merge all input files (assuming gz extension)
extensions = [filename.split(".")[-1] for filename in filenames]
if set(extensions) != set(['gz']):
raise ValueError("Your input FastQ files must be zipped")
output_filename = kwargs['output']
if output_filename is None:
logger.error("You must use --output filename.gz")
sys.exit(1)
if output_filename.endswith(".gz") is False:
raise ValueError("your output file must end in .gz")
p1 = subprocess.Popen(['zcat'] + list(filenames), stdout=subprocess.PIPE)
fout = open(output_filename, 'wb')
p2 = subprocess.run(['pigz'], stdin=p1.stdout, stdout=fout)
else: #pragma: no cover
print("Use one of the commands")
@main.command()
@click.argument('name', type=click.STRING)
@click.option('--check', is_flag=True)
@click.option('--extract-adapters', is_flag=True)
@click.option('--quick-fix', is_flag=True)
@click.option('--output', default=None)
def samplesheet(**kwargs):
"""Utilities to manipulate sample sheet"""
name = kwargs['name']
from sequana.iem import IEM
if kwargs['check']:
iem = IEM(name)
iem.validate()
logger.info("SampleSheet looks correct")
elif kwargs["extract_adapters"]:
iem = IEM(name)
iem.to_fasta()
elif kwargs["quick_fix"]:
iem = IEM(name, tryme=True)
if kwargs['output']:
filename = kwargs['output']
else:
filename = name + ".fixed"
logger.info("Saving fixed version in {}".format(filename))
iem.quick_fix(output_filename=filename)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=-1)
@click.option("--module",
required=False,
type=click.Choice(["bamqc", "bam", "fasta", "fastq", "gff"]))
def summary(**kwargs):
"""Create a HTML report for various type of NGS formats.
\b
* bamqc
* fastq
This will process all files in the given pattern (in back quotes)
sequentially and procude one HTML file per input file.
Other module all work in the same way. For example, for FastQ files::
sequana summary one_input.fastq
sequana summary `ls *fastq`
"""
names = kwargs['name']
module = kwargs['module']
if module is None:
if names[0].endswith('fastq.gz') or names[0].endswith('.fastq'):
module = "fastq"
elif names[0].endswith('.bam'):
module = "bam"
elif names[0].endswith('.gff') or names[0].endswith('gff3'):
module = "gff"
elif names[0].endswith('fasta.gz') or names[0].endswith('.fasta'):
module = "fasta"
else:
logger.error("please use --module to tell us about the input fimes")
sys.exit(1)
if module == "bamqc":
for name in names:
print(f"Processing {name}")
from sequana.modules_report.bamqc import BAMQCModule
report = BAMQCModule(name, "bamqc.html")
elif module == "fasta": # there is no module per se. HEre we just call FastA.summary()
from sequana.fasta import FastA
for name in names:
f = FastA(name)
f.summary()
elif module == "fastq": # there is no module per se. HEre we just call FastA.summary()
from sequana.fastq import FastQ
from sequana import FastQC
for filename in names:
ff = FastQC(filename, max_sample=1e6, verbose=False)
stats = ff.get_stats()
print(stats)
elif module == "bam":
import pandas as pd
from sequana import BAM
for filename in names:
ff = BAM(filename)
stats = ff.get_stats()
df = pd.Series(stats).to_frame().T
print(df)
elif module == "gff":
import pandas as pd
from sequana import GFF3
for filename in names:
ff = GFF3(filename)
print("#filename: {}".format(filename))
print("#Number of entries per genetic type:")
print(ff.df.value_counts('type').to_string())
print("#Number of duplicated attribute (if any) per attribute:")
ff.get_duplicated_attributes_per_type()
@main.command()
@click.option("--file1", type=click.Path(),
default=None, required=True,
help="""The first input RNA-seq table to compare""")
@click.option("--file2", type=click.Path(),
default=None, required=True,
help="""The second input RNA-seq table to compare""")
@common_logger
def rnaseq_compare(**kwargs):
"""Compare 2 tables created by the 'sequana rnadiff' command"""
from sequana.compare import RNADiffCompare
c = RNADiffCompare(kwargs['file1'], kwargs['file2'])
c.plot_volcano_differences()
from pylab import savefig
savefig("sequana_rnaseq_compare_volcano.png", dpi=200)
@main.command()
@click.option("--annotation", type=click.Path(),
default=None,
help="""The annotation GFF file used to perform the feature count""")
@click.option("--report-only",
is_flag=True,
default=False,
help="""Generate report assuming results are already present""")
@click.option("--output-directory", type=click.Path(),
default="rnadiff",
help="""Output directory where are saved the results""")
@click.option("--features", type=click.Path(),
default="all_features.out",
help="""The Counts from feature counts. This should be the output of the
sequana_rnaseq pipeline all_features.out """)
#FIXME I think it would be better to have a single file with multiple columns
#for alternative condition (specified using the "condition" option)
@click.option("--design", type=click.Path(),
default="design.csv", help="""It should have been generated by sequana_rnaseq. If
not, it must be a comma separated file with two columns. One for the label to be
found in the --features file and one column with the condition to which it
belong. E.g. with 3 replicates and 2 conditions. It should look like:
\b
label,condition
WT1,WT
WT2,WT
WT3,WT
file1,cond1
fileother,cond1
""")
@click.option("--condition", type=str,
default="condition", help="""The name of the column in design.csv to use as condition
for the differential analysis. Default is 'condition'""")
@click.option("--feature-name",
default="gene",
help="""The feature name compatible with your GFF. Default is 'gene'""")
@click.option("--attribute-name",
default="ID",
help="""The attribute used as identifier. compatible with your GFF. Default is 'ID'""")
@click.option("--reference", type=click.Path(),
default=None,
help="""The reference to test DGE against. If provided, conditions not
involving the reference are ignored. Otherwise all combinations are
tested""")
@click.option("--comparisons", type=click.Path(),
default=None,
help="""Not yet implemented. By default, all comparisons are computed""")
@click.option("--cooks-cutoff", type=click.Path(),
default=None,
help="""if none, let DESeq2 choose the cutoff""")
@click.option("--independent-filtering/--no-independent-filtering",
default=False,
help="""Do not perform independent_filtering by default. low counts may not
have adjusted pvalues otherwise""")
@click.option("--beta-prior/--no-beta-prior",
default=False,
help="Use beta priori or not. Default is no beta prior")
@click.option("--fit-type",
default="parametric",
help="DESeq2 type of fit. Default is 'parametric'")
@common_logger
def rnadiff(**kwargs):
"""Perform RNA-seq differential analysis.
This command performs the differential analysis of gene expression. The
analysis is performed on feature counts generated by a RNA-seq analysis
(see e.g. https://github.com/sequana/rnaseq pipeline). The analysis is
performed by DESeq2. A HTML report is created as well as a set of output
files, including summary table of the analysis.
To perform this analysis, you will need the GFF file used during the RNA-seq
analysis, the feature stored altogether in a single file, an experimental
design file, and the feature and attribute used during the feature count.
Here is an example:
\b
sequana rnadiff --annotation Lepto.gff
--design design.csv --features all_features.out
--feature-name gene --attribute-name ID
"""
import pandas as pd
from sequana.featurecounts import FeatureCount
from sequana.rnadiff import RNADiffAnalysis, RNADesign
from sequana.modules_report.rnadiff import RNAdiffModule
logger.setLevel(kwargs['logger'])
outdir = kwargs['output_directory']
feature = kwargs['feature_name']
attribute = kwargs['attribute_name']
design = kwargs['design']
reference=kwargs['reference']
if kwargs['annotation']:
gff = kwargs['annotation']
logger.info(f"Checking annotation file")
from sequana import GFF3
g = GFF3(gff) #.save_annotation_to_csv()
if feature not in g.features:
logger.critical(f"{feature} not found in the GFF. Most probably a wrong feature name")
attributes = g.get_attributes(feature)
if attribute not in attributes:
logger.critical(f"{attribute} not found in the GFF for the provided feature. Most probably a wrong feature name. Please change --attribute-name option or do not provide any GFF")
sys.exit(1)
else:
gff = None
design_check = RNADesign(design, reference=reference)
compa_csv = kwargs['comparisons']
if compa_csv:
compa_df = pd.read_csv(compa_csv)
comparisons = list(zip(compa_df["alternative"], compa_df["reference"]))
else:
comparisons = design_check.comparisons
if kwargs['report_only'] is False:
logger.info(f"Processing features counts and saving into {outdir}/light_counts.csv")
fc = FeatureCount(kwargs['features'])
from easydev import mkdirs
mkdirs(f"{outdir}")
fc.rnadiff_df.to_csv(f"{outdir}/light_counts.csv")
logger.info(f"Differential analysis to be saved into ./{outdir}")
for k in sorted(["independent_filtering", "beta_prior",
"cooks_cutoff", "fit_type", "reference"]):
logger.info(f" Parameter {k} set to : {kwargs[k]}")
r = RNADiffAnalysis(f"{outdir}/light_counts.csv", design,
condition=kwargs["condition"],
comparisons=comparisons,
fc_feature=feature,
fc_attribute=attribute,
outdir=outdir,
gff=gff,
cooks_cutoff=kwargs.get("cooks_cutoff"),
independent_filtering=kwargs.get("independent_filtering"),
beta_prior=kwargs.get("beta_prior"),
fit_type=kwargs.get('fit_type')
)
logger.info(f"Saving output files into {outdir}/rnadiff.csv")
try:
results = r.run()
results.to_csv(f"{outdir}/rnadiff.csv")
except Exception as err:
logger.error(err)
sys.exit(1)
else:
logger.info(f"DGE done.")
# cleanup if succesful
os.remove(f"{outdir}/rnadiff.err")
os.remove(f"{outdir}/rnadiff.out")
os.remove(f"{outdir}/rnadiff_light.R")
logger.info(f"Reporting. Saving in rnadiff.html")
report = RNAdiffModule(outdir, kwargs['design'], gff=gff,
fc_attribute=attribute,
fc_feature=feature,
alpha=0.05,
log2_fc=0,
condition=kwargs["condition"],
annot_cols=None,
pattern="*vs*_degs_DESeq2.csv")
@main.command()
@click.option("--mart", default="ENSEMBL_MART_ENSEMBL",
show_default=True,
help="A valid mart name")
@click.option("--dataset", required=True,
help="A valid dataset name. e.g. mmusculus_gene_ensembl, hsapiens_gene_ensembl")
@click.option("--attributes", multiple=True,
default=["ensembl_gene_id","go_id","entrezgene_id","external_gene_name"],
show_default=True,
help="A list of valid attributes to look for in the dataset")
@click.option("--output", default=None,
help="""by default save results into a CSV file named
biomart_<dataset>_<YEAR>_<MONTH>_<DAY>.csv""")
@common_logger
def biomart(**kwargs):
"""Retrieve information from biomart and save into CSV file
This command uses BioMart from BioServices to introspect a MART service
(--mart) and a specific dataset (default to mmusculus_gene_ensembl). Then,
for all ensembl IDs, it will fetch the requested attributes (--attributes).
Finally, it saves the CSV file into an output file (--output). This takes
about 5-10 minutes to retrieve the data depending on the connection.
"""
print(kwargs)
logger.setLevel(kwargs["logger"])
mart = kwargs['mart']
attributes = kwargs['attributes']
dataset = kwargs["dataset"]
from sequana.enrichment import Mart
conv = Mart(dataset, mart)
df = conv.query(attributes)
conv.save(df, filename=kwargs['output'])
@main.command()
@click.option("-i", "--input", required=True,
help="The salmon input file.")
@click.option("-o", "--output", required=True,
help="The feature counts output file")
@click.option("-f", "--gff", required=True,
help="A GFF file compatible with your salmon file")
@click.option("-a", "--attribute", default="ID",
help="A valid attribute to be found in the GFF file and salmon input")
@click.option("-a", "--feature", default="gene",
help="A valid feature")
def salmon(**kwargs):
"""Convert output of Salmon into a feature counts file """
from sequana import salmon
salmon_input = kwargs['input']
output = kwargs["output"]
if os.path.exists(salmon_input) is False:
logger.critical("Input file does not exists ({})".format(salmon_input))
gff = kwargs["gff"]
attribute = kwargs['attribute']
feature = kwargs['feature']
# reads file generated by salmon and generated count file as expected by
# DGE.
s = salmon.Salmon(salmon_input, gff)
s.save_feature_counts(output, feature=feature, attribute=attribute)
@main.command()
@click.option("-i", "--input", required=True)
@click.option("-o", "--output", required=True)
def gtf_fixer(**kwargs):
"""Reads GTF and fix known issues (exon and genes uniqueness)"""
from sequana.gtf import GTFFixer
gtf = GTFFixer(kwargs['input'])
res = gtf.fix_exons_uniqueness(kwargs['output'])
#res = gtf.fix_exons_uniqueness(kwargs['output'])
print(res)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True),
nargs=1)
@click.option("--annotation-attribute", type=click.STRING,
#required=True,
default="Name",
help="a valid taxon identifiers")
@click.option("--panther-taxon", type=click.INT,
#required=True,
default=0,
help="a valid taxon identifiers")
@click.option("--kegg-name", type=click.STRING,
default=None,
help="a valid KEGG name (automatically filled for 9606 (human) and 10090 (mmusculus)")
@click.option("--log2-foldchange-cutoff", type=click.FLOAT,
default=1,
show_default=True,
help="remove events with absolute log2 fold change below this value")
@click.option("--padj-cutoff", type=click.FLOAT,
default=0.05,
show_default=True,
help="remove events with pvalue abobe this value default (0.05).")
@click.option("--biomart", type=click.STRING,
default=None,
help="""you may need a biomart mapping of your identifier for the kegg
pathways analysis. If you do not have this file, you can use 'sequana biomart'
command""")
@click.option("--go-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only panther db enrichment""")
@click.option("--plot-linearx", type=click.BOOL,
default=False,
is_flag=True,
help="""Default is log2 fold enrichment in the plots. use this to use linear scale""")
@click.option("--compute-levels", type=click.BOOL,
default=False,
is_flag=True,
help="""to compute the GO levels (slow) in the plots""")
@click.option("--max-genes", type=click.INT,
default=2000,
help="""Maximum number of genes (up or down) to use in PantherDB, which is limited to about 3000""")
@click.option("--kegg-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only kegg patways enrichment""")
@click.option("--kegg-pathways-directory", type=click.Path(),
default=None,
help="""a place where to find the pathways for each organism""")
@click.option("--kegg-background", type=click.INT,
default=None,
help="""a background for kegg enrichment. If None, set to number of genes found in KEGG""")
@common_logger
def enrichment(**kwargs):
"""Create a HTML report for various sequana out
\b
* enrichment: the output of RNADiff pipeline
Example for the enrichment module:
sequana enrichment rnadiff.csv --panther-taxon 10090
--log2-foldchange-cutoff 2 --kegg-only
The KEGG pathways are loaded and it may take time. Once done, they are saved
in kegg_pathways/organism and be loaded next time:
sequana enrichment rnadiff/rnadiff.csv
--panther-taxon 189518 \
--log2-foldchange-cutoff 2 --kegg-only \
--kegg-name lbi\
--annotation file.gff
"""
import pandas as pd
from sequana.modules_report.enrichment import Enrichment
logger.setLevel(kwargs['logger'])
taxon = kwargs['panther_taxon']
if taxon == 0:
logger.error("You must provide a taxon with --panther-taxon")
return
keggname = kwargs['kegg_name']
params = {"padj": kwargs['padj_cutoff'],
"log2_fc": kwargs['log2_foldchange_cutoff'],
"max_entries": kwargs['max_genes'],
"mapper": kwargs['biomart'],
"kegg_background": kwargs['kegg_background'],
"preload_directory": kwargs['kegg_pathways_directory'],
"plot_logx": not kwargs['plot_linearx'],
"plot_compute_levels": kwargs['compute_levels'],
}
filename = kwargs['biomart']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
filename = kwargs['kegg_pathways_directory']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
rnadiff_file = kwargs['name']
logger.info(f"Reading {rnadiff_file}")
rnadiff = pd.read_csv(rnadiff_file, index_col=0, header=[0,1])
# now that we have loaded all results from a rnadiff analysis, let us
# perform the enrichment for each comparison found in the file
annot_col = kwargs['annotation_attribute']
Nmax = kwargs['max_genes']
from sequana.utils import config
for compa in rnadiff.columns.levels[0]:
if compa not in ['statistics', 'annotation']:
# get gene list
df = rnadiff[compa].copy()
# we add the annotation
for x in rnadiff['annotation'].columns:
df[x] = rnadiff['annotation'][x]
# now we find the gene lists
padj = params['padj']
log2fc = params['log2_fc']
df = df.query("(log2FoldChange >=@log2fc or log2FoldChange<=-@log2fc) and padj <= @padj")
df.reset_index(inplace=True)
dfup = df.sort_values("log2FoldChange", ascending=False)
up_genes = list(dfup.query("log2FoldChange > 0")[annot_col])[:Nmax]
dfdown = df.sort_values("log2FoldChange", ascending=True)
down_genes = list(dfdown.query("log2FoldChange < 0")[annot_col])[:Nmax]
all_genes = list(
df.sort_values("log2FoldChange", key=abs,ascending=False)[annot_col]
)[:Nmax]
gene_dict = {
"up": up_genes,
"down": down_genes,
"all": all_genes,
}
Nup = len(up_genes)
Ndown = len(down_genes)
N = Nup + Ndown
logger.info(f"Computing enrichment for the {compa} case")
logger.info(f"Found {Nup} genes up-regulated, {Ndown} down regulated ({N} in total).")
config.output_dir = f"enrichment/{compa}"
try:os.mkdir("enrichment")
except:pass
report = Enrichment(gene_dict, taxon, df,
kegg_organism=keggname,
enrichment_params=params,
go_only=kwargs["go_only"],
kegg_only=kwargs["kegg_only"],
command=" ".join(['sequana'] + sys.argv[1:]))
@main.command()
@click.option("--search-kegg", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@click.option("--search-panther", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@common_logger
def taxonomy(**kwargs):
"""Tool to retrieve taxonomic information.
sequana taxonomy --search-kegg leptospira
"""
if kwargs['search_kegg']:
from sequana.kegg import KEGGHelper
k = KEGGHelper()
results = k.search(kwargs['search_kegg'].lower())
print(results)
elif kwargs['search_panther']:
import pandas as pd
from sequana import sequana_data
df = pd.read_csv(sequana_data("panther.csv"), index_col=0)
pattern = kwargs['search_panther']
f1 = df[[True if pattern in x else False for x in df['name']]]
f2 = df[[True if pattern in x else False for x in df.short_name]]
f3 = df[[True if pattern in x else False for x in df.long_name]]
indices = list(f1.index) + list(f2.index) + list(f3.index)
if len(indices) == 0:
# maybe it is a taxon ID ?
f4 = df[[True if pattern in str(x) else False for x in df.taxon_id]]
indices = list(f4.index)
indices = set(indices)
print(df.loc[indices])
@main.command()
@click.argument("gff_filename", type=click.Path(exists=True))
@common_logger
def gff2gtf(**kwargs):
"""Convert a GFF file into GTF
This is experimental convertion. Use with care.
"""
filename = kwargs["gff_filename"]
assert filename.endswith(".gff") or filename.endswith(".gff3")
from sequana.gff3 import GFF3
g = GFF3(filename)
if filename.endswith(".gff"):
g.to_gtf(os.path.basename(filename).replace(".gff", ".gtf"))
elif filename.endswith(".gff3"):
g.to_gtf(os.path.basename(filename).replace(".gff3", ".gtf"))
| bsd-3-clause |
calispac/digicampipe | digicampipe/image/disp.py | 1 | 9792 | import matplotlib.pyplot as plt
import numpy as np
from tqdm import trange
from digicampipe.instrument.camera import Camera
def disp_eval(parameters, width, length, cog_x, cog_y,
x_offset, y_offset, psi, skewness,
size, leakage2, method):
parameter_values = parameters.valuesdict()
# (Lessard et al., 2001)
if method == 1:
disp_comp = parameter_values['A0'] * (1 - width / length)
# eq 2.10 in Lopez Coto, VHE gamma-ray observations of pulsar
# wind nebuale ..., but better reference is: (Domingo-Santamaria+, 2005)
elif method == 2:
A = (parameter_values['A0'] + parameter_values['A1'] * np.log10(size)
+ parameter_values['A2'] * np.log10(size) ** 2)
B = (parameter_values['A3'] + parameter_values['A4'] * np.log10(size)
+ parameter_values['A5'] * np.log10(size) ** 2)
eta = (parameter_values['A6'] + parameter_values['A7'] * np.log10(size)
+ parameter_values['A8'] * np.log10(size) ** 2)
disp_comp = A + B * (width / (length + eta * leakage2))
# just some test
elif method == 3:
disp_comp = parameter_values['A0'] + \
parameter_values['A1'] * length / width
# Kranich and Stark, ICRC 2003
elif method == 4:
disp_comp = (parameter_values['A0']
* (1 - width / (length *
(1 + parameter_values['A1'] * leakage2))))
# (Luke Riley St Marie 2014) <-- simplified Domingo-Santamaria
elif method == 5:
fraction = width / (length + parameter_values['A2']
* leakage2 * np.log10(size))
disp_comp = (np.log10(size)
* (parameter_values['A0'] + parameter_values['A1'] *
(1 - fraction)))
x_source_comp0 = cog_x + disp_comp * np.cos(psi) # Two possible solutions
y_source_comp0 = cog_y + disp_comp * np.sin(psi) #
x_source_comp1 = cog_x - disp_comp * np.cos(psi) #
y_source_comp1 = cog_y - disp_comp * np.sin(psi) #
# Selection of one specific solution according to skewness
# - head of the shower (close to the source) gives more signal
# - if skewness > 0, head is closer to the center of FOV than tail
x_source_comp = np.zeros(disp_comp.shape)
y_source_comp = np.zeros(disp_comp.shape)
skewness_is_positive = skewness > 0
x_source_comp[skewness_is_positive] = x_source_comp1[skewness_is_positive]
x_source_comp[~skewness_is_positive] = x_source_comp0[
~skewness_is_positive]
y_source_comp[skewness_is_positive] = y_source_comp1[skewness_is_positive]
y_source_comp[~skewness_is_positive] = y_source_comp0[
~skewness_is_positive]
residuals = np.array([x_offset - x_source_comp, y_offset - y_source_comp])
# Multi-dimensional minimization needs residuals as
# a simple 1D vector, thus the .flatten() is applied
return disp_comp, x_source_comp, y_source_comp, residuals.flatten()
def leak_pixels(image):
cam = Camera()
geom = cam.geometry
neighbor_matrix = geom.neighbor_matrix
n_neighbors = np.sum(np.array(neighbor_matrix, dtype=int), axis=0)
# border pixels
camera_border_mask = n_neighbors < 6
# second pixel ring
n_neighbor_inner = np.sum(
np.array(np.multiply(neighbor_matrix,
camera_border_mask), dtype=int), axis=1)
camera_second_ring_mask = n_neighbor_inner >= 3
# two pixel rings mask
camera_two_rings_mask = camera_border_mask + camera_second_ring_mask
# Signal in two outermost pixel rings
signal_border = np.sum(image[:, camera_two_rings_mask], axis=1)
# Signal in full image
signal_full = np.sum(image, axis=1)
# LEAKAGE2 = the ratio between the light content in the two outermost
# camera pixel rings and the total light content of the recorded shower
# image
leakage2 = signal_border / signal_full
return leakage2, camera_two_rings_mask, signal_full, signal_border
def arrival_distribution(disp_comp, x_source_comp, y_source_comp, n_triples,
theta_squared_cut, bins, x_minmax, y_minmax
):
# For each event a set of possible arrival directions is calculated
# as an intersection with another two events, chosen from
# all events in the dataset. The arrival direction for given set of
# events is stored if sum of theta^2 for given triplet is less than
# theta_squared_cut.
x_intersect = []
y_intersect = []
n_bin_values_all = np.zeros((bins, bins))
theta_squared_sum_hist = []
for i in trange(len(disp_comp)):
events1 = np.random.randint(0, len(disp_comp), n_triples)
events2 = np.random.randint(0, len(disp_comp), n_triples)
for j, k in zip(events1, events2):
if j != i and k != j:
x_triple = [x_source_comp[i],
x_source_comp[j],
x_source_comp[k]]
y_triple = [y_source_comp[i],
y_source_comp[j],
y_source_comp[k]]
x_mean = np.mean(x_triple)
y_mean = np.mean(y_triple)
# Mean arrival direction of the triplet is taken into account
# only if its 'spread' is not too large. It means that
# the direction is well defined. As a measure of the spread,
# sum of theta^2 is taken. Theta means in this case the
# distance between triplet mean and computed position for each
# event in the triplet.
theta_squared_sum = sum(
(x_mean - x_triple) ** 2.0 +
(y_mean - y_triple) ** 2.0
)
if theta_squared_sum < theta_squared_cut:
x_intersect.append(x_mean)
y_intersect.append(y_mean)
theta_squared_sum_hist.append(theta_squared_sum)
# binning and normalization
n_bin = np.histogram2d(x_intersect,
y_intersect,
bins=bins,
range=[x_minmax, y_minmax]
)
if sum(sum(n_bin[0])) > 0:
n_bin_values = n_bin[0] / sum(sum(n_bin[0]))
# arrival distribution superposition for all events
n_bin_values_all = n_bin_values_all + n_bin_values
x_intersect = []
y_intersect = []
return n_bin_values_all, n_bin, theta_squared_sum_hist
# RESOLUTION
def res_gaussian(xy, x0, y0, sigma, H, bkg): # 2D Gaussian model
x, y = xy
theta_squared = (x0 - x) ** 2.0 + (y0 - y) ** 2.0
G = H * np.exp(-theta_squared / (2 * sigma ** 2.0)) + bkg
return G
# R68 resolution (if the distribution is gaussian, R68 = sigma)
# Modified so that the events above certain cut are not taken into calculations
# In this version CUT = R99
def r68(x, y, offset_x, offset_y):
center_x = offset_x # np.mean(x)
center_y = offset_y # np.mean(y)
x = x - center_x
y = y - center_y
N_full = len(x)
r99 = 0.05
r68_full = 0.05
N_in = 0
while N_in < 0.99 * N_full:
N_in = len(x[(x ** 2.0 + y ** 2.0 < r99 ** 2.0)])
r99 = r99 + 0.001
N_in = 0
while N_in < 0.682 * N_full:
N_in = len(x[(x ** 2.0 + y ** 2.0 < r68_full ** 2.0)])
r68_full = r68_full + 0.001
cut = r99
r68 = 0.05
N_in = 0
N_full = len(x[(x ** 2.0 + y ** 2.0 < cut ** 2.0)])
while N_in < 0.682 * N_full:
N_in = len(x[(x ** 2.0 + y ** 2.0 < r68 ** 2.0)])
r68 = r68 + 0.001
return r68_full, r68, r99, center_x, center_y
# R68 for modified version of DISP
def r68mod(x, y, n_bin_values, offset_x, offset_y):
center_x = offset_x # np.mean(x)
center_y = offset_y # np.mean(y)
x = x - center_x
y = y - center_y
N_full = sum(n_bin_values)
r99 = 0.05
r68_full = 0.05
N_in = 0
while N_in < 0.99 * N_full:
N_in = sum(n_bin_values[(x ** 2.0 + y ** 2.0 < r99 ** 2.0)])
r99 = r99 + 0.001
N_in = 0
while N_in < 0.682 * N_full:
N_in = sum(n_bin_values[(x ** 2.0 + y ** 2.0 < r68_full ** 2.0)])
r68_full = r68_full + 0.001
cut = r99
r68 = 0.05
N_in = 0
N_full = sum(n_bin_values[(x ** 2.0 + y ** 2.0 < cut ** 2.0)])
while N_in < 0.682 * N_full:
N_in = sum(n_bin_values[(x ** 2.0 + y ** 2.0 < r68 ** 2.0)])
r68 = r68 + 0.001
return r68_full, r68, r99, center_x, center_y
# PLOTTING
def plot_2d(data, vmin, vmax, xlabel, ylabel, cbarlabel):
rms2 = data[:, 2].reshape(
(len(np.unique(data[:, 0])),
len(np.unique(data[:, 1]))
))
x, y = np.meshgrid(np.unique(data[:, 1]), np.unique(data[:, 0]))
fig = plt.figure(figsize=(9, 8))
fig.add_subplot(111)
plt.imshow(rms2, vmin=vmin, vmax=vmax)
cbar = plt.colorbar()
cbar.set_label(cbarlabel)
plt.xticks(range(len(x[0])), x[0])
plt.yticks(range(len(y[:, 0])), y[:, 0])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_event(pix_x, pix_y, image):
plt.figure(figsize=(9, 9))
plt.scatter(pix_x[image == 0], pix_y[image == 0], color=[0.9, 0.9, 0.9])
pix_x_event = pix_x[image > 0]
pix_y_event = pix_y[image > 0]
image_event = image[image > 0]
plt.scatter(pix_x_event, pix_y_event, c=image_event)
plt.ylabel('FOV Y [mm]')
plt.xlabel('FOV X [mm]')
plt.tight_layout()
# function for adding correct ticks corresponding with x,y coordinates to
# the plot instead of indexes of plotted matrix
def extents(f):
delta = f[1] - f[0]
return [f[0] - delta / 2, f[-1] + delta / 2]
| gpl-3.0 |
teoliphant/numpy-refactor | doc/example.py | 11 | 3503 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
describe : type
Explanation
output : type
Explanation
tuple : type
Explanation
items : type
even more explaining
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
ndingwall/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 11 | 5041 | """
============================
Gradient Boosting regression
============================
This example demonstrates Gradient Boosting to produce a predictive
model from an ensemble of weak predictive models. Gradient boosting can be used
for regression and classification problems. Here, we will train a model to
tackle a diabetes regression task. We will obtain the results from
:class:`~sklearn.ensemble.GradientBoostingRegressor` with least squares loss
and 500 regression trees of depth 4.
Note: For larger datasets (n_samples >= 10000), please refer to
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# Maria Telenczuk <https://github.com/maikia>
# Katrina Ni <https://github.com/nilichen>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, ensemble
from sklearn.inspection import permutation_importance
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# %%
# Load the data
# -------------------------------------
#
# First we need to load the data.
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# %%
# Data preprocessing
# -------------------------------------
#
# Next, we will split our dataset to use 90% for training and leave the rest
# for testing. We will also set the regression model parameters. You can play
# with these parameters to see how the results change.
#
# n_estimators : the number of boosting stages that will be performed.
# Later, we will plot deviance against boosting iterations.
#
# max_depth : limits the number of nodes in the tree.
# The best value depends on the interaction of the input variables.
#
# min_samples_split : the minimum number of samples required to split an
# internal node.
#
# learning_rate : how much the contribution of each tree will shrink.
#
# loss : loss function to optimize. The least squares function is used in this
# case however, there are many other options (see
# :class:`~sklearn.ensemble.GradientBoostingRegressor` ).
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=13)
params = {'n_estimators': 500,
'max_depth': 4,
'min_samples_split': 5,
'learning_rate': 0.01,
'loss': 'ls'}
# %%
# Fit regression model
# -------------------------------------
#
# Now we will initiate the gradient boosting regressors and fit it with our
# training data. Let's also look and the mean squared error on the test data.
reg = ensemble.GradientBoostingRegressor(**params)
reg.fit(X_train, y_train)
mse = mean_squared_error(y_test, reg.predict(X_test))
print("The mean squared error (MSE) on test set: {:.4f}".format(mse))
# %%
# Plot training deviance
# -------------------------------------
#
# Finally, we will visualize the results. To do that we will first compute the
# test set deviance and then plot it against boosting iterations.
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(reg.staged_predict(X_test)):
test_score[i] = reg.loss_(y_test, y_pred)
fig = plt.figure(figsize=(6, 6))
plt.subplot(1, 1, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, reg.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
fig.tight_layout()
plt.show()
# %%
# Plot feature importance
# -------------------------------------
#
# Careful, impurity-based feature importances can be misleading for
# high cardinality features (many unique values). As an alternative,
# the permutation importances of ``reg`` can be computed on a
# held out test set. See :ref:`permutation_importance` for more details.
#
# For this example, the impurity-based and permutation methods identify the
# same 2 strongly predictive features but not in the same order. The third most
# predictive feature, "bp", is also the same for the 2 methods. The remaining
# features are less predictive and the error bars of the permutation plot
# show that they overlap with 0.
feature_importance = reg.feature_importances_
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig = plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(diabetes.feature_names)[sorted_idx])
plt.title('Feature Importance (MDI)')
result = permutation_importance(reg, X_test, y_test, n_repeats=10,
random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
plt.subplot(1, 2, 2)
plt.boxplot(result.importances[sorted_idx].T,
vert=False, labels=np.array(diabetes.feature_names)[sorted_idx])
plt.title("Permutation Importance (test set)")
fig.tight_layout()
plt.show()
| bsd-3-clause |
CallaJun/hackprince | indico/mpl_toolkits/axes_grid1/colorbar.py | 8 | 27927 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.collections as collections
import matplotlib.contour as contour
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Bbox
make_axes_kw_doc = '''
============= ====================================================
Property Description
============= ====================================================
*orientation* vertical or horizontal
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
============= ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g., '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
It is known that some vector graphics viewer (svg and pdf) renders white gaps
between segments of the colorbar. This is due to bugs in the viewers not
matplotlib. As a workaround the colorbar can be rendered with overlapping
segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However this has negative consequences in other circumstances. Particularly with
semi transparent images (alpha < 1) and colorbar extensions and is not enabled
by default see (issue #1188).
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
The transData of the *cax* is adjusted so that the limits in the
longest axis actually corresponds to the limits in colorbar range. On
the other hand, the shortest axis has a data limits of [1,2], whose
unconventional value is to prevent underflow when log scale is used.
''' % (make_axes_kw_doc, colormap_kw_doc)
docstring.interpd.update(colorbar_doc=colorbar_doc)
class CbarAxesLocator(object):
"""
CbarAxesLocator is a axes_locator for colorbar axes. It adjust the
position of the axes to make a room for extended ends, i.e., the
extended ends are located outside the axes area.
"""
def __init__(self, locator=None, extend="neither", orientation="vertical"):
"""
*locator* : the bbox returned from the locator is used as a
initial axes location. If None, axes.bbox is used.
*extend* : same as in ColorbarBase
*orientation* : same as in ColorbarBase
"""
self._locator = locator
self.extesion_fraction = 0.05
self.extend = extend
self.orientation = orientation
def get_original_position(self, axes, renderer):
"""
get the original position of the axes.
"""
if self._locator is None:
bbox = axes.get_position(original=True)
else:
bbox = self._locator(axes, renderer)
return bbox
def get_end_vertices(self):
"""
return a tuple of two vertices for the colorbar extended ends.
The first vertices is for the minimum end, and the second is for
the maximum end.
"""
# Note that concatenating two vertices needs to make a
# vertices for the frame.
extesion_fraction = self.extesion_fraction
corx = extesion_fraction*2.
cory = 1./(1. - corx)
x1, y1, w, h = 0, 0, 1, 1
x2, y2 = x1 + w, y1 + h
dw, dh = w*extesion_fraction, h*extesion_fraction*cory
if self.extend in ["min", "both"]:
bottom = [(x1, y1),
(x1+w/2., y1-dh),
(x2, y1)]
else:
bottom = [(x1, y1),
(x2, y1)]
if self.extend in ["max", "both"]:
top = [(x2, y2),
(x1+w/2., y2+dh),
(x1, y2)]
else:
top = [(x2, y2),
(x1, y2)]
if self.orientation == "horizontal":
bottom = [(y,x) for (x,y) in bottom]
top = [(y,x) for (x,y) in top]
return bottom, top
def get_path_patch(self):
"""
get the path for axes patch
"""
end1, end2 = self.get_end_vertices()
verts = [] + end1 + end2 + end1[:1]
return Path(verts)
def get_path_ends(self):
"""
get the paths for extended ends
"""
end1, end2 = self.get_end_vertices()
return Path(end1), Path(end2)
def __call__(self, axes, renderer):
"""
Return the adjusted position of the axes
"""
bbox0 = self.get_original_position(axes, renderer)
bbox = bbox0
x1, y1, w, h = bbox.bounds
extesion_fraction = self.extesion_fraction
dw, dh = w*extesion_fraction, h*extesion_fraction
if self.extend in ["min", "both"]:
if self.orientation == "horizontal":
x1 = x1 + dw
else:
y1 = y1+dh
if self.extend in ["max", "both"]:
if self.orientation == "horizontal":
w = w-2*dw
else:
h = h-2*dh
return Bbox.from_bounds(x1, y1, w, h)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
# artists
self.solids = None
self.lines = None
self.dividers = None
self.extension_patch1 = None
self.extension_patch2 = None
if orientation == "vertical":
self.cbar_axis = self.ax.yaxis
else:
self.cbar_axis = self.ax.xaxis
if format is None:
if isinstance(self.norm, colors.LogNorm):
# change both axis for proper aspect
self.ax.xaxis.set_scale("log")
self.ax.yaxis.set_scale("log")
self.ax._update_transScale()
self.cbar_axis.set_minor_locator(ticker.NullLocator())
formatter = ticker.LogFormatter()
else:
formatter = None
elif cbook.is_string_like(format):
formatter = ticker.FormatStrFormatter(format)
else:
formatter = format # Assume it is a Formatter
if formatter is None:
formatter = self.cbar_axis.get_major_formatter()
else:
self.cbar_axis.set_major_formatter(formatter)
if cbook.iterable(ticks):
self.cbar_axis.set_ticks(ticks)
elif ticks is not None:
self.cbar_axis.set_major_locator(ticks)
else:
self._select_locator(formatter)
self._config_axes()
self.update_artists()
self.set_label_text('')
def _get_colorbar_limits(self):
"""
initial limits for colorbar range. The returned min, max values
will be used to create colorbar solid(?) and etc.
"""
if self.boundaries is not None:
C = self.boundaries
if self.extend in ["min", "both"]:
C = C[1:]
if self.extend in ["max", "both"]:
C = C[:-1]
return min(C), max(C)
else:
return self.get_clim()
def _config_axes(self):
'''
Adjust the properties of the axes to be adequate for colorbar display.
'''
ax = self.ax
axes_locator = CbarAxesLocator(ax.get_axes_locator(),
extend=self.extend,
orientation=self.orientation)
ax.set_axes_locator(axes_locator)
# override the get_data_ratio for the aspect works.
def _f():
return 1.
ax.get_data_ratio = _f
ax.get_data_ratio_log = _f
ax.set_frame_on(True)
ax.set_navigate(False)
self.ax.set_autoscalex_on(False)
self.ax.set_autoscaley_on(False)
if self.orientation == 'horizontal':
ax.xaxis.set_label_position('bottom')
ax.set_yticks([])
else:
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
def update_artists(self):
"""
Update the colorbar associated artists, *filled* and
*ends*. Note that *lines* are not updated. This needs to be
called whenever clim of associated image changes.
"""
self._process_values()
self._add_ends()
X, Y = self._mesh()
if self.filled:
C = self._values[:,np.newaxis]
self._add_solids(X, Y, C)
ax = self.ax
vmin, vmax = self._get_colorbar_limits()
if self.orientation == 'horizontal':
ax.set_ylim(1, 2)
ax.set_xlim(vmin, vmax)
else:
ax.set_xlim(1, 2)
ax.set_ylim(vmin, vmax)
def _add_ends(self):
"""
Create patches from extended ends and add them to the axes.
"""
del self.extension_patch1
del self.extension_patch2
path1, path2 = self.ax.get_axes_locator().get_path_ends()
fc=mpl.rcParams['axes.facecolor']
ec=mpl.rcParams['axes.edgecolor']
linewidths=0.5*mpl.rcParams['axes.linewidth']
self.extension_patch1 = PathPatch(path1,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.extension_patch2 = PathPatch(path2,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.ax.add_artist(self.extension_patch1)
self.ax.add_artist(self.extension_patch2)
def _set_label_text(self):
"""
set label.
"""
self.cbar_axis.set_label_text(self._label, **self._labelkw)
def set_label_text(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label_text()
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [list(zip(X[i], Y[i])) for i in xrange(1, N-1)]
else:
return [list(zip(Y[i], X[i])) for i in xrange(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolormesh`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.extend in ["min", "both"]:
cc = self.to_rgba([C[0][0]])
self.extension_patch1.set_fc(cc[0])
X, Y, C = X[1:], Y[1:], C[1:]
if self.extend in ["max", "both"]:
cc = self.to_rgba([C[-1][0]])
self.extension_patch2.set_fc(cc[0])
X, Y, C = X[:-1], Y[:-1], C[:-1]
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha,
}
del self.solids
del self.dividers
col = self.ax.pcolormesh(*args, **kw)
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],),
)
self.ax.add_collection(self.dividers)
else:
self.dividers = None
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar. It deletes preexisting lines.
'''
del self.lines
N = len(levels)
x = np.array([1.0, 2.0])
X, Y = np.meshgrid(x,levels)
if self.orientation == 'vertical':
xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
else:
xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
col = collections.LineCollection(xy, linewidths=linewidths,
)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _select_locator(self, formatter):
'''
select a suitable locator
'''
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator(nbins=5)
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b) #, nbins=10)
self.cbar_axis.set_major_locator(locator)
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v = np.arange(self.cmap.N, dtype=np.int16)
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = np.array(self.norm.boundaries)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v = 0.5*(bi[:-1] + bi[1:])
self._boundaries = b
self._values = v
return
else:
b = self._uniform_y(self.cmap.N+1)
self._process_values(b)
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries.
'''
vmin, vmax = self._get_colorbar_limits()
if isinstance(self.norm, colors.LogNorm):
y = np.logspace(np.log10(vmin), np.log10(vmax), N)
else:
y = np.linspace(vmin, vmax, N)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([1.0, 2.0])
if self.spacing == 'uniform':
y = self._uniform_y(len(self._boundaries))
else:
y = self._boundaries
self._y = y
X, Y = np.meshgrid(x,y)
return X, Y
def set_alpha(self, alpha):
"""
set alpha value.
"""
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
"""
Update the colorbar artists to reflect the change of the
associated mappable.
"""
self.update_artists()
if isinstance(mappable, contour.ContourSet):
if not mappable.filled:
self.add_lines(mappable)
@docstring.Substitution(make_axes_kw_doc)
def make_axes(parent, **kw):
'''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
'''
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
def colorbar(mappable, cax=None, ax=None, **kw):
"""
Create a colorbar for a ScalarMappable instance.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if cax is None:
cax, kw = make_axes(ax, **kw)
cax.hold(True)
cb = Colorbar(cax, mappable, **kw)
def on_changed(m):
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
ax.figure.sca(ax)
return cb
| lgpl-3.0 |
stevielu/viewfinder | backend/logs/generate_pdf_report.py | 13 | 11252 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Generate graphs from metrics data and output as pdf.
The PDF files are written to S3 and links send by email.
Simply run with:
python -m viewfinder.backend.logs.generate_pdf_report --devbox
Options:
- require_lock: default=True: grab a lock on 'generate_pdf_reports' before running
- analysis_intervals_days: default=14,90: generate one file for each interval
- upload_to_s3: default=True: upload PDF files to S3
- s3_dest: default='pdf_reports': directory in S3 to write to (inside bucket 'serverdata')
- local_working_dir: default='/tmp/': write pdf files to this local dir (they are not deleted)
- send_email: default=True: send an email report. If false, uses the LoggingEmailManager
- email: [email protected]: email recipient
- s3_url_expiration_days: default=14: time to live for the generated S3 urls.
"""
__author__ = '[email protected] (Marc Berhault)'
import json
import logging
import os
import re
import sys
import time
import traceback
from collections import Counter, defaultdict
from datetime import datetime
from tornado import gen, options
from viewfinder.backend.base import constants, main, retry, util
from viewfinder.backend.base.dotdict import DotDict
from viewfinder.backend.db import db_client, metric
from viewfinder.backend.db.job import Job
from viewfinder.backend.logs import logs_util
from viewfinder.backend.services.email_mgr import EmailManager, LoggingEmailManager, SendGridEmailManager
from viewfinder.backend.storage.object_store import ObjectStore
import matplotlib.pyplot as plt
from matplotlib import dates as mdates
from matplotlib.backends.backend_pdf import PdfPages
options.define('require_lock', default=True, help='Acquire job lock on "generate_pdf_reports" before running')
options.define('analysis_intervals_days', default=[14, 90], help='Intervals to analyze, in days')
options.define('upload_to_s3', default=True, help='Upload generated files to S3')
options.define('s3_dest', default='pdf_reports', help='S3 directory to write to (in serverdata bucket)')
options.define('local_working_dir', default='/tmp/', help='Local directory to write generated pdf files to')
options.define('send_email', default=True, help='Email links to reports')
options.define('email', default='[email protected]', help='Email recipient')
options.define('s3_url_expiration_days', default=14, help='Expiration time in days for S3 URLs')
# Retry policy for uploading files to S3.
kS3UploadRetryPolicy = retry.RetryPolicy(max_tries=5, timeout=300,
min_delay=1, max_delay=30,
check_exception=retry.RetryPolicy.AlwaysRetryOnException)
# Metrics to sum up into a new one.
kSummedMetrics = [ (re.compile(r'itunes\.downloads.*'), r'itunes.downloads'),
(re.compile(r'itunes\.updates.*'), r'itunes.updates'),
]
# Metric selection.
kFilteredMetrics = [ r'itunes\.updates',
r'itunes\.download',
r'db\.table\.count\.(Comment|Photo|User)$',
r'active_users\.requests_(all|share|post)\.(1d|7d|30d)',
]
# Metrics to draw on the same graph, associated title and legend.
kPlotAggregates = {
r'(active_users\.requests_all)\.(1d|7d|30d)': {
'title_rep': r'Active users (all requests)',
'legend_rep': r'\2',
},
r'(active_users\.requests_post)\.(1d|7d|30d)': {
'title_rep': r'Active users posting comments',
'legend_rep': r'\2',
},
r'(active_users\.requests_share)\.(1d|7d|30d)': {
'title_rep': r'Active users sharing photos',
'legend_rep': r'\2',
},
r'db\.table\.count\.(Comment|Photo|User)': {
'title_rep': r'Total \1s',
'legend_rep': None,
},
r'itunes\.(downloads|update)': {
'title_rep': r'Daily iTunes \1',
'legend_rep': None,
},
}
def SerializeMetrics(metrics):
def _SkipMetric(name):
for regex in kFilteredMetrics:
res = re.match(regex, k)
if res is not None:
return False
return True
def _AggregateMetric(running_sum, metric_name):
"""Given a metric name, determine whether we sum it into a different metric name or not.
Returns whether the original metric needs to be processed.
"""
keep = True
for regex, replacement, in kSummedMetrics:
res = regex.sub(replacement, metric_name)
if res != metric_name:
keep = False
if not _SkipMetric(res):
running_sum[res] += v
return keep
data = defaultdict(list)
prev_metrics = {}
seen_vars = set()
for m in metrics:
running_sum = Counter()
timestamp = m.timestamp
payload = DotDict(json.loads(m.payload)).flatten()
for k, v in payload.iteritems():
keep_original = _AggregateMetric(running_sum, k)
if keep_original and not _SkipMetric(k):
running_sum[k] += v
for k, v in running_sum.iteritems():
data[k].append((timestamp, v))
return data
@gen.engine
def ProcessOneInterval(client, num_days, callback):
end_time = time.time()
start_time = time.time() - constants.SECONDS_PER_DAY * num_days
selected_interval = metric.LOGS_INTERVALS[-1]
group_key = metric.Metric.EncodeGroupKey(metric.LOGS_STATS_NAME, selected_interval)
logging.info('Query performance counters %s, range: %s - %s, resolution: %s'
% (group_key, time.ctime(start_time), time.ctime(end_time), selected_interval.name))
metrics = list()
start_key = None
while True:
new_metrics = yield gen.Task(metric.Metric.QueryTimespan, client, group_key,
start_time, end_time, excl_start_key=start_key)
if len(new_metrics) > 0:
metrics.extend(new_metrics)
start_key = metrics[-1].GetKey()
else:
break
data = SerializeMetrics(metrics)
def _DetermineTitle(metric_name):
for regex, props in kPlotAggregates.iteritems():
if not re.match(regex, metric_name):
continue
tres = re.sub(regex, props['title_rep'], metric_name)
legend_rep = props.get('legend_rep', None)
if not legend_rep:
return (tres, None)
else:
vres = re.sub(regex, legend_rep, metric_name)
return (tres, vres)
return (metric_name, metric_name)
def _SaveFig(legend_data):
logging.info('Drawing with legend_data=%r' % legend_data)
if legend_data:
# Shrink the figure vertically.
box = plt.gca().get_position()
plt.gca().set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])
# Put a legend below current axis
plt.legend(legend_data, loc='upper center', bbox_to_anchor=(0.5, -0.20),
fancybox=True, shadow=True, ncol=5)
elif plt.legend():
plt.legend().set_visible(False)
# Write to pdf as a new page.
plt.savefig(pp, format='pdf')
# Clear all.
plt.clf()
plt.cla()
# PdfPages overwrites any existing files. Should unlink fail, we'll let the exception surface.
filename = '%dd-viewfinder-report.%s.pdf' % (num_days, util.NowUTCToISO8601())
pp = PdfPages(os.path.join(options.options.local_working_dir, filename))
last_entry = None
legend_strings = []
for k in sorted(data.keys()):
timestamps = []
y_axis = []
for a, b in data[k]:
dt = datetime.utcfromtimestamp(a)
dt = dt.replace(hour=0)
timestamps.append(dt)
y_axis.append(b)
x_axis = mdates.date2num(timestamps)
title, label = _DetermineTitle(k)
if last_entry is not None and last_entry != title:
# Different data set: draw figure, write to pdf and clear everything.
_SaveFig(legend_strings)
legend_strings = []
last_entry = title
if label:
legend_strings.append(label)
# autofmt_xdate sets the formatter and locator to AutoDate*. It seems smart enough.
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
# plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.title(title)
plt.grid(True)
# Plot data.
plt.plot_date(x_axis, y_axis, '-')
plt.gcf().autofmt_xdate()
_SaveFig(legend_strings)
pp.close()
callback(filename)
@gen.engine
def UploadFiles(object_store, filenames, callback):
for f in filenames:
local_file = os.path.join(options.options.local_working_dir, f)
contents = open(local_file, 'r').read()
remote_file = os.path.join(options.options.s3_dest, f)
# Assume 1MB/s transfer speed. If we don't have that good a connection, we really shouldn't be uploading big files.
timeout = max(20.0, len(contents) / 1024 * 1024)
yield gen.Task(retry.CallWithRetryAsync, kS3UploadRetryPolicy,
object_store.Put, remote_file, contents, request_timeout=timeout)
logging.info('Uploaded %d bytes to S3 file %s' % (len(contents), remote_file))
callback()
@gen.engine
def SendEmail(title, text, callback):
args = {
'from': '[email protected]',
'fromname': 'Viewfinder reports',
'to': options.options.email,
'subject': title,
'text': text
}
yield gen.Task(EmailManager.Instance().SendEmail, description=title, **args)
callback()
@gen.engine
def SendReport(object_store, filename_dict, callback):
text = 'Viewfinder statistics report:\n'
text += '(URLs expire after %d days)\n\n' % options.options.s3_url_expiration_days
for days in sorted(filename_dict.keys()):
filename = filename_dict[days]
remote_file = os.path.join(options.options.s3_dest, filename)
url = object_store.GenerateUrl(remote_file,
expires_in=constants.SECONDS_PER_DAY * options.options.s3_url_expiration_days,
content_type='application/pdf')
text += 'Past %d days: %s\n\n' % (days, url)
title = 'Viewfinder statistics report: %s' % util.NowUTCToISO8601()
yield gen.Task(SendEmail, title, text)
callback()
@gen.engine
def RunOnce(client, callback):
object_store = ObjectStore.GetInstance(ObjectStore.SERVER_DATA)
filenames = {}
for num_days in options.options.analysis_intervals_days:
filename = yield gen.Task(ProcessOneInterval, client, num_days)
filenames[num_days] = filename
yield gen.Task(UploadFiles, object_store, filenames.values())
yield gen.Task(SendReport, object_store, filenames)
callback()
@gen.engine
def Start(callback):
client = db_client.DBClient.Instance()
job = Job(client, 'generate_pdf_reports')
if options.options.require_lock:
got_lock = yield gen.Task(job.AcquireLock)
if got_lock == False:
logging.warning('Failed to acquire job lock: exiting.')
callback()
return
if options.options.send_email:
# When running on devbox, this prompts for the passphrase. Skip if not sending email.
EmailManager.SetInstance(SendGridEmailManager())
else:
EmailManager.SetInstance(LoggingEmailManager())
# We never call job.Start() since we don't want a summary status written to the DB, just the lock.
try:
yield gen.Task(RunOnce, client)
except:
logging.error(traceback.format_exc())
finally:
yield gen.Task(job.ReleaseLock)
callback()
if __name__ == '__main__':
sys.exit(main.InitAndRun(Start))
| apache-2.0 |
glouppe/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
Eric89GXL/scipy | scipy/signal/wavelets.py | 67 | 10523 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| bsd-3-clause |
dstftw/youtube-dl | youtube_dl/extractor/wsj.py | 30 | 4694 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
https?://(?:www\.)?(?:wsj|barrons)\.com/video/(?:[^/]+/)+|
wsj:
)
(?P<id>[a-fA-F0-9-]{36})
'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}, {
'url': 'http://www.barrons.com/video/capitalism-deserves-more-respect-from-millennials/F301217E-6F46-43AE-B8D2-B7180D642EE9.html',
'only_matching': True,
}, {
'url': 'https://www.wsj.com/video/series/a-brief-history-of/the-modern-cell-carrier-how-we-got-here/980E2187-401D-48A1-B82B-1486CEE06CB9',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://video-api.wsj.com/api-video/find_all_videos.asp', video_id,
query={
'type': 'guid',
'count': 1,
'query': video_id,
'fields': ','.join((
'type', 'hls', 'videoMP4List', 'thumbnailList', 'author',
'description', 'name', 'duration', 'videoURL', 'titletag',
'formattedCreationDate', 'keywords', 'editor')),
})['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
class WSJArticleIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
'info_dict': {
'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
'ext': 'mp4',
'upload_date': '20170221',
'uploader_id': 'ralcaraz',
'title': 'Bao Bao the Panda Leaves for China',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
video_id = self._search_regex(
r'data-src=["\']([a-fA-F0-9-]{36})', webpage, 'video id')
return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id)
| unlicense |
flavioamieiro/AlertaDengue | AlertaDengue/dados/dbdata.py | 1 | 27642 | """
Este módulo contem funções para interagir com o banco principal do projeto
Alertadengue.
"""
from sqlalchemy import create_engine
from django.core.cache import cache
from collections import defaultdict
from datetime import datetime, timedelta
# local
from .episem import episem
from . import settings
import pandas as pd
import numpy as np
# rio de janeiro city geocode
MRJ_GEOCODE = 3304557
CID10 = {
'dengue': 'A90',
'chikungunya': 'A920',
'zika': 'A928'
}
STATE_NAME = {
'CE': 'Ceará',
'ES': 'Espírito Santo',
'MG': 'Minas Gerais',
'PR': 'Paraná',
'RJ': 'Rio de Janeiro'
}
STATE_INITIAL = dict(zip(STATE_NAME.keys(), STATE_NAME.values()))
db_engine = create_engine("postgresql://{}:{}@{}/{}".format(
settings.PSQL_USER,
settings.PSQL_PASSWORD,
settings.PSQL_HOST,
settings.PSQL_DB
))
def _nan_to_num_int_list(v):
"""
:param v: numpy.array
:return: list
"""
try:
return np.nan_to_num(v.fillna(0)).astype(int).tolist()
except:
return np.nan_to_num(v).astype(int).tolist()
def _episem(dt):
return episem(dt, sep='')
def get_disease_suffix(disease: str):
"""
:param disease:
:return:
"""
return (
'' if disease == 'dengue' else
'_chik' if disease == 'chikungunya' else
'_zika' if disease == 'zika' else
''
)
def get_cities() -> dict:
"""
Get a list of cities from available states with code and name pairs
:return:
"""
with db_engine.connect() as conn:
state_names = [
"'%s'" % state_name for state_name in STATE_NAME.values()
]
sql = '''
SELECT geocodigo, nome
FROM "Dengue_global"."Municipio"
WHERE uf IN(%s)
''' % ','.join(state_names)
return dict(conn.execute(sql).fetchall())
def get_city_name_by_id(geocode: int):
"""
:param geocode:
:return:
"""
with db_engine.connect() as conn:
res = conn.execute('''
SELECT nome
FROM "Dengue_global"."Municipio"
WHERE geocodigo=%s;
''' % geocode)
return res.fetchone()[0]
def get_all_active_cities():
"""
Fetch from the database a list on names of active cities
:return: list of tuples (geocode,name)
"""
res = cache.get('get_all_active_cities')
if res is None:
with db_engine.connect() as conn:
res = conn.execute(
' SELECT DISTINCT municipio_geocodigo, municipio_nome'
' FROM "Municipio"."Historico_alerta";')
res = res.fetchall()
cache.set(
'get_all_active_cities', res, settings.QUERY_CACHE_TIMEOUT
)
return res
def get_alerta_mrj():
"""
Fetch the alert table for the city of Rio de janeiro
:return: pandas dataframe
"""
sql = 'select * from "Municipio".alerta_mrj;'
with db_engine.connect() as conn:
return pd.read_sql_query(sql, conn, index_col='id')
def get_alerta_mrj_chik():
"""
Fetch the alert table for the city of Rio de janeiro
:return: pandas dataframe
"""
sql = 'select * from "Municipio".alerta_mrj_chik;'
with db_engine.connect() as conexao:
return pd.read_sql_query(sql, conexao, index_col='id')
def get_alerta_mrj_zika():
"""
Fetch the alert table for the city of Rio de janeiro
:return: pandas dataframe
"""
sql = 'select * from "Municipio".alerta_mrj_zika;'
with db_engine.connect() as conexao:
return pd.read_sql_query(sql, conexao, index_col='id')
def get_last_alert(geo_id, disease):
"""
:param geo_id:
:param disease:
:return:
"""
table_name = 'Historico_alerta' + get_disease_suffix(disease)
sql = '''
SELECT nivel
FROM "Municipio"."%s"
WHERE municipio_geocodigo=%s
ORDER BY "data_iniSE" DESC
LIMIT 1
''' % (table_name, geo_id)
with db_engine.connect() as conn:
return pd.read_sql_query(sql, conn)
def get_city(query):
"""
Fetch city geocode, name and state from the database,
matching the substring query
:param query: substring of the city
:return: list of dictionaries
"""
with db_engine.connect() as conexao:
sql = (
' SELECT distinct municipio_geocodigo, nome, uf' +
' FROM "Municipio"."Historico_alerta" AS alert' +
' INNER JOIN "Dengue_global"."Municipio" AS city' +
' ON alert.municipio_geocodigo=city.geocodigo' +
' WHERE nome ilike(%s);'
)
result = conexao.execute(sql, ('%'+query+'%',))
return result.fetchall()
def get_series_by_UF(disease='dengue'):
"""
Get the incidence series from the database aggregated (sum) by state
:param UF: substring of the name of the state
:param disease: dengue|chikungunya|zika
:return: Dataframe with the series in long format
"""
cache_id = 'get_series_by_UF-{}'.format(disease)
series = cache.get(cache_id)
_disease = get_disease_suffix(disease)
if series is None:
with db_engine.connect() as conn:
series = pd.read_sql(
'select * from uf_total{}_view;'.format(_disease),
conn, parse_dates=True
)
cache.set(cache_id, series, settings.QUERY_CACHE_TIMEOUT)
return series
def get_n_chik_alerts():
"""
:return: int
"""
sql = '''
SELECT COUNT(*) AS n_alerts
FROM "Municipio"."Historico_alerta_chik"
'''
return pd.read_sql_query(sql, db_engine).loc[0, 'n_alerts']
def get_n_zika_alerts():
"""
:return: int
"""
sql = '''
SELECT COUNT(*) AS n_alerts
FROM "Municipio"."Historico_alerta_zika"
'''
return pd.read_sql_query(sql, db_engine).loc[0, 'n_alerts']
def load_series(cidade, disease: str='dengue', epiweek: int=0):
"""
Monta as séries do alerta para visualização no site
:param cidade: geocodigo da cidade desejada
:param disease: dengue|chikungunya|zika
:param epiweek:
:return: dictionary
"""
cache_key = 'load_series-{}-{}'.format(cidade, disease)
result = cache.get(cache_key)
if result is None:
ap = str(cidade)
if epiweek is not None:
dados_alerta = Forecast.load_cases(
geocode=cidade, disease=disease, epiweek=epiweek
)
else:
dados_alerta = load_cases_without_forecast(
geocode=cidade, disease=disease
)
if len(dados_alerta) == 0:
return {ap: None}
# tweets = pd.read_sql_query('select * from "Municipio"."Tweet"
# where "Municipio_geocodigo"={}'.format(cidade), parse_dates=True)
series = defaultdict(lambda: defaultdict(lambda: []))
series[ap]['dia'] = dados_alerta.data_iniSE.tolist()
# series[ap]['tweets'] = [float(i) if not np.isnan(i) else
# None for i in tweets.numero]
# series[ap]['tmin'] = [float(i) if not np.isnan(i) else
# None for i in G.get_group(ap).tmin]
series[ap]['casos_est_min'] = _nan_to_num_int_list(
dados_alerta.casos_est_min
)
series[ap]['casos_est'] = _nan_to_num_int_list(
dados_alerta.casos_est
)
series[ap]['casos_est_max'] = _nan_to_num_int_list(
dados_alerta.casos_est_max
)
series[ap]['casos'] = _nan_to_num_int_list(dados_alerta.casos)
# (1,4)->(0,3)
series[ap]['alerta'] = (dados_alerta.nivel.fillna(1).astype(int)-1).tolist()
series[ap]['SE'] = (dados_alerta.SE.astype(int)).tolist()
series[ap]['prt1'] = dados_alerta.p_rt1.astype(float).tolist()
k_forecast = [
k for k in dados_alerta.keys()
if k.startswith('forecast_')
]
if k_forecast:
for k in k_forecast:
series[ap][k] = (
dados_alerta[k].astype(float).tolist()
)
series[ap] = dict(series[ap])
result = dict(series)
cache.set(cache_key, result, settings.QUERY_CACHE_TIMEOUT)
return result
def load_cases_without_forecast(geocode: int, disease):
"""
:param geocode:
:param disease:
:return:
"""
with db_engine.connect() as conn:
if geocode == MRJ_GEOCODE: # RJ city
table_name = 'alerta' + get_disease_suffix(disease)
data_alert = pd.read_sql_query('''
SELECT
data AS "data_iniSE",
SUM(casos_estmin) AS casos_est_min,
SUM(casos_est) as casos_est,
SUM(casos_estmax) AS casos_est_max,
SUM(casos) AS casos,
MAX(nivel) AS nivel,
se AS "SE",
SUM(prt1) AS p_rt1
FROM "Municipio".{}
GROUP BY "data_iniSE", "SE"
'''.format(table_name),
conn, parse_dates=True
)
else:
table_name = 'Historico_alerta' + get_disease_suffix(disease)
data_alert = pd.read_sql_query('''
SELECT * FROM "Municipio"."{}"
WHERE municipio_geocodigo={} ORDER BY "data_iniSE" ASC
'''.format(table_name, geocode),
conn, parse_dates=True
)
return data_alert
def load_serie_cities(geocodigos, doenca='dengue'):
"""
Monta as séries do alerta para visualização no site
:param cidade: geocodigo da cidade desejada
:param doenca: dengue|chik|zika
:return: dictionary
"""
result = {}
_geocodigos = {}
aps = []
cidades = []
for cidade in geocodigos:
cache_key = 'load_series-{}-{}'.format(cidade, doenca)
_result = cache.get(cache_key)
ap = str(cidade)
aps.append(ap)
if _result is not None:
result.update(_result)
else:
cidades.append(add_dv(int(ap[:-1])))
_geocodigos[cidades[-1]] = cidade
if not cidades:
return result
sql = ('''
SELECT
id, municipio_geocodigo, casos_est, casos,
"data_iniSE", casos_est_min, casos_est_max,
nivel, "SE", p_rt1
FROM "Municipio"."Historico_alerta"
WHERE municipio_geocodigo IN (''' + ('{},'*len(cidades))[:-1] + ''')
ORDER BY municipio_geocodigo ASC, "data_iniSE" ASC
''').format(*cidades)
with db_engine.connect() as conn:
dados_alerta = pd.read_sql_query(
sql, conn, 'id', parse_dates=True
)
if len(dados_alerta) == 0:
raise NameError(
"Não foi possível obter os dados do Banco"
)
series = defaultdict(lambda: defaultdict(lambda: []))
for k, v in _geocodigos.items():
ap = str(v)
mask = dados_alerta.municipio_geocodigo == k
series[ap]['dia'] = dados_alerta[mask].data_iniSE.tolist()
series[ap]['casos_est_min'] = np.nan_to_num(
dados_alerta[mask].casos_est_min).astype(int).tolist()
series[ap]['casos_est'] = np.nan_to_num(
dados_alerta[mask].casos_est
).astype(int).tolist()
series[ap]['casos_est_max'] = np.nan_to_num(
dados_alerta[mask].casos_est_max).astype(int).tolist()
series[ap]['casos'] = np.nan_to_num(
dados_alerta[mask].casos
).astype(int).tolist()
series[ap]['alerta'] = (
dados_alerta[mask].nivel.astype(int) - 1
).tolist() # (1,4)->(0,3)
series[ap]['SE'] = (dados_alerta[mask].SE.astype(int)).tolist()
series[ap]['prt1'] = dados_alerta[mask].p_rt1.astype(float).tolist()
series[ap] = dict(series[ap])
cache_key = 'load_series-{}-{}'.format(ap, doenca)
cache.set(cache_key, {ap: series[ap]}, settings.QUERY_CACHE_TIMEOUT)
return series
def get_city_alert(cidade, disease='dengue'):
"""
Retorna vários indicadores de alerta a nível da cidade.
:param cidade: geocódigo
:param doenca: dengue|chikungunya|zika
:return: tuple -> alert, SE, case_series, last_year,
obs_case_series, min_max_est, dia, prt1
"""
series = load_series(cidade, disease)
series_city = series[str(cidade)]
if series_city is None:
return (
[], None, [0], 0,
[0], [0, 0], datetime.now(), 0
)
alert = series_city['alerta'][-1]
SE = series_city['SE'][-1]
case_series = series_city['casos_est']
last_year = series_city['casos'][-52]
obs_case_series = series_city['casos']
min_max_est = (
series_city['casos_est_min'][-1],
series_city['casos_est_max'][-1])
dia = series_city['dia'][-1]
prt1 = np.mean(series_city['prt1'][-3:])
return (
alert, SE, case_series, last_year,
obs_case_series, min_max_est, dia, prt1
)
def calculate_digit(dig):
"""
Calcula o digito verificador do geocódigo de município
:param dig: geocódigo com 6 dígitos
:return: dígito verificador
"""
peso = [1, 2, 1, 2, 1, 2, 0]
soma = 0
dig = str(dig)
for i in range(6):
valor = int(dig[i]) * peso[i]
soma += sum([int(d) for d in str(valor)]) if valor > 9 else valor
dv = 0 if soma % 10 == 0 else (10 - (soma % 10))
return dv
def add_dv(geocodigo):
"""
Retorna o geocóodigo do município adicionando o digito verificador,
se necessário.
:param geocodigo: geocóodigo com 6 ou 7 dígitos
"""
if len(str(geocodigo)) == 7:
return geocodigo
else:
return int(str(geocodigo) + str(calculate_digit(geocodigo)))
class NotificationResume:
@staticmethod
def count_cities_by_uf(uf, disease='dengue'):
"""
Returna contagem de cidades participantes por estado
:param uf: uf a ser consultada
:param disease: dengue|chikungunya|zika
:return: dataframe
"""
table_name = 'Historico_alerta' + get_disease_suffix(disease)
sql = '''
SELECT COALESCE(COUNT(municipio_geocodigo), 0) AS count
FROM (
SELECT DISTINCT municipio_geocodigo
FROM "Municipio"."%s") AS alerta
INNER JOIN "Dengue_global"."Municipio" AS municipio
ON alerta.municipio_geocodigo = municipio.geocodigo
WHERE uf='%s'
''' % (table_name, uf)
with db_engine.connect() as conn:
return pd.read_sql(sql, conn).astype(int).iloc[0]['count']
@staticmethod
def count_cases_by_uf(uf, se):
"""
Returna contagem de cidades participantes por estado
:param uf: uf a ser consultada
:param se: número do ano e semana (no ano), ex: 201503
:return: dataframe
"""
sql = '''
SELECT
COALESCE(SUM(casos), 0) AS casos,
COALESCE(SUM(casos_est), 0) AS casos_est
FROM
"Municipio".historico_casos AS dengue
INNER JOIN "Dengue_global"."Municipio" AS city
ON dengue.municipio_geocodigo = city.geocodigo
WHERE uf='%s' AND "SE" = %s
''' % (uf, se)
with db_engine.connect() as conn:
return pd.read_sql(sql, conn).astype(int)
@staticmethod
def count_cases_week_variation_by_uf(uf, se1, se2):
"""
Returna contagem de cidades participantes por estado
AND alerta."SE"=(select epi_week(NOW()::DATE))
AND alerta_passado."SE"=(select epi_week(NOW()::DATE-7))
:param uf: uf a ser consutado
:param se1: número do ano e semana (no ano), ex: 201503
:param se2: número do ano e semana (no ano), ex: 201503
:return: dataframe
"""
sql = '''
SELECT
COALESCE(SUM(alerta.casos)-SUM(alerta_passado.casos), 0) AS casos,
COALESCE(SUM(alerta.casos_est)-SUM(alerta_passado.casos_est), 0)
AS casos_est
FROM "Municipio".historico_casos AS alerta
INNER JOIN "Municipio".historico_casos AS alerta_passado
ON (
alerta.municipio_geocodigo = alerta_passado.municipio_geocodigo
AND alerta."SE"=%s
AND alerta_passado."SE"=%s)
INNER JOIN "Dengue_global"."Municipio" AS municipio
ON alerta.municipio_geocodigo = municipio.geocodigo
WHERE uf ='%s'
''' % (se2, se1, uf)
with db_engine.connect() as conn:
return pd.read_sql(sql, conn).astype(int)
@staticmethod
def tail_estimated_cases(geo_ids, n=12):
"""
:param geo_ids: list of city geo ids
:param n: the last n estimated cases
:return: dict
"""
if len(geo_ids) < 1:
raise Exception('GEO id list should have at least 1 code.')
sql_template = '''(
SELECT
municipio_geocodigo, "data_iniSE", casos_est
FROM
"Municipio".historico_casos
WHERE
municipio_geocodigo={}
ORDER BY
"data_iniSE" DESC
LIMIT ''' + str(n) + ')'
sql = ' UNION '.join([
sql_template.format(gid) for gid in geo_ids
])
if len(geo_ids) > 1:
sql += ' ORDER BY municipio_geocodigo, "data_iniSE"'
with db_engine.connect() as conn:
df_case_series = pd.read_sql(sql, conn)
return {
k: v.casos_est.values.tolist()
for k, v in df_case_series.groupby(by='municipio_geocodigo')
}
@staticmethod
def get_cities_alert_by_state(state_name, disease='dengue'):
"""
Retorna vários indicadores de alerta a nível da cidade.
:param state_name: State name
:param disease: dengue|chikungunya|zika
:return: tupla
"""
_disease = get_disease_suffix(disease)
sql = '''
SELECT
hist_alert.id,
hist_alert.municipio_geocodigo,
municipio.nome,
hist_alert."data_iniSE",
(hist_alert.nivel-1) AS level_alert
FROM
"Municipio"."Historico_alerta{0}" AS hist_alert
INNER JOIN (
SELECT geocodigo, MAX("data_iniSE") AS "data_iniSE"
FROM
"Municipio"."Historico_alerta{0}" AS alerta
INNER JOIN "Dengue_global"."Municipio" AS municipio
ON alerta.municipio_geocodigo = municipio.geocodigo
WHERE uf='{1}'
GROUP BY geocodigo
) AS recent_alert ON (
recent_alert.geocodigo=hist_alert.municipio_geocodigo
AND recent_alert."data_iniSE"=hist_alert."data_iniSE"
) INNER JOIN "Dengue_global"."Municipio" AS municipio ON (
hist_alert.municipio_geocodigo = municipio.geocodigo
)
'''.format(_disease, state_name)
with db_engine.connect() as conn:
return pd.read_sql_query(sql, conn, 'id', parse_dates=True)
@staticmethod
def get_4_weeks_variation(state_name, current_date):
# for variation_4_weeks
se_current_year_1 = _episem(current_date)
se_current_year_2 = _episem(current_date - timedelta(days=0, weeks=3))
se_last_year_1 = _episem(current_date - timedelta(days=0, weeks=52))
se_last_year_2 = _episem(current_date - timedelta(days=0, weeks=55))
sql = '''
SELECT
casos_corrente-casos_passado AS casos,
casos_est_corrente-casos_est_passado AS casos_est
FROM
(SELECT
COALESCE(SUM(alerta.casos), 0) AS casos_corrente,
COALESCE(SUM(alerta.casos_est), 0) AS casos_est_corrente
FROM "Municipio".historico_casos AS alerta
INNER JOIN "Dengue_global"."Municipio" AS municipio
ON alerta.municipio_geocodigo = municipio.geocodigo
AND uf ='%(state_name)s'
WHERE
alerta."SE" <= %(se_current_year_1)s
AND alerta."SE" >= %(se_current_year_2)s
) AS tb_casos
INNER JOIN (
SELECT
COALESCE(SUM(alerta.casos), 0) AS casos_passado,
COALESCE(SUM(alerta.casos_est), 0) AS casos_est_passado
FROM "Municipio".historico_casos AS alerta
INNER JOIN "Dengue_global"."Municipio" AS municipio
ON alerta.municipio_geocodigo = municipio.geocodigo
AND uf ='%(state_name)s'
WHERE
alerta."SE" <= %(se_last_year_1)s
AND alerta."SE" >= %(se_last_year_2)s
) AS tb_casos_passado
ON (1=1)
''' % {
'state_name': state_name,
'se_current_year_1': se_current_year_1,
'se_current_year_2': se_current_year_2,
'se_last_year_1': se_last_year_1,
'se_last_year_2': se_last_year_2,
}
with db_engine.connect() as conn:
return pd.read_sql_query(sql, conn, parse_dates=True)
class Forecast:
@staticmethod
def get_min_max_date(geocode: int, cid10: str) -> (str, str):
"""
:param geocode:
:param cid10:
:return: tuple with min and max date (str) from the forecasts
"""
sql = '''
SELECT
TO_CHAR(MIN(init_date_epiweek), 'YYYY-MM-DD') AS epiweek_min,
TO_CHAR(MAX(init_date_epiweek), 'YYYY-MM-DD') AS epiweek_max
FROM
forecast.forecast_cases AS f
INNER JOIN forecast.forecast_city AS fc
ON (f.geocode = fc.geocode AND fc.active=TRUE)
INNER JOIN forecast.forecast_model AS fm
ON (fc.forecast_model_id = fm.id AND fm.active = TRUE)
WHERE f.geocode={} AND cid10='{}'
'''.format(geocode, cid10)
values = pd.read_sql_query(sql, db_engine).values.flat
return values[0], values[1]
@staticmethod
def load_cases(geocode: int, disease: str, epiweek: int):
"""
:param geocode:
:param disease:
:param epiweek:
:return:
"""
# sql settings
cid10 = CID10[disease]
sql = '''
SELECT DISTINCT ON (forecast_cases.forecast_model_id)
forecast_cases.forecast_model_id,
forecast_model.name AS forecast_model_name,
forecast_cases.published_date
FROM
forecast.forecast_cases
INNER JOIN forecast.forecast_model
ON (
forecast_cases.forecast_model_id =
forecast_model.id
)
WHERE
cid10 = '%s'
AND geocode = %s
AND epiweek = %s
ORDER BY forecast_model_id, published_date DESC
''' % (cid10, geocode, epiweek)
with db_engine.connect() as conn:
df_forecast_model = pd.read_sql(sql, con=conn)
if geocode == MRJ_GEOCODE: # RJ city
table_name = 'alerta_mrj' + get_disease_suffix(disease)
sql_alert = '''
SELECT
data AS "data_iniSE",
SUM(casos_estmin) AS casos_est_min,
SUM(casos_est) as casos_est,
SUM(casos_estmax) AS casos_est_max,
SUM(casos) AS casos,
MAX(nivel) AS nivel,
se AS "SE",
SUM(prt1) AS p_rt1
FROM "Municipio".{}
GROUP BY "data_iniSE", "SE"
'''.format(table_name)
else:
table_name = 'Historico_alerta' + get_disease_suffix(disease)
sql_alert = '''
SELECT * FROM "Municipio"."{}"
WHERE municipio_geocodigo={} ORDER BY "data_iniSE" ASC
'''.format(table_name, geocode)
sql = """
SELECT
(CASE
WHEN tb_cases."data_iniSE" IS NOT NULL
THEN tb_cases."data_iniSE"
%(forecast_date_ini_epiweek)s
ELSE NULL
END
) AS "data_iniSE",
tb_cases.casos_est_min,
tb_cases.casos_est,
tb_cases.casos_est_max,
tb_cases.casos,
tb_cases.nivel,
(CASE
WHEN tb_cases."SE" IS NOT NULL THEN tb_cases."SE"
%(forecast_epiweek)s
ELSE NULL
END
) AS "SE",
tb_cases.p_rt1
%(forecast_models_cases)s
FROM
(%(sql_alert)s) AS tb_cases %(forecast_models_joins)s
ORDER BY "data_iniSE" ASC
"""
sql_forecast_by_model = '''
FULL OUTER JOIN (
SELECT
epiweek,
init_date_epiweek,
cases AS forecast_%(model_name)s_cases
FROM
forecast.forecast_cases
INNER JOIN forecast.forecast_model
ON (
forecast_cases.forecast_model_id = forecast_model.id
AND forecast_model.active=TRUE
)
INNER JOIN forecast.forecast_city
ON (
forecast_city.geocode = forecast_cases.geocode
AND forecast_cases.forecast_model_id =
forecast_city.forecast_model_id
AND forecast_city.active=TRUE
)
WHERE
cid10='%(cid10)s'
AND forecast_cases.geocode=%(geocode)s
AND published_date='%(published_date)s'
AND forecast_cases.forecast_model_id=%(model_id)s
) AS forecast%(model_id)s ON (
tb_cases."SE" = forecast%(model_id)s.epiweek
)
'''
forecast_date_ini_epiweek = ''
forecast_models_cases = ''
forecast_models_joins = ''
forecast_epiweek = ''
forecast_config = {
'geocode': geocode,
'cid10': cid10,
'published_date': None,
'model_name': None,
'model_id': None
}
for i, row in df_forecast_model.iterrows():
forecast_config.update({
'published_date': row.published_date,
'model_name': row.forecast_model_name,
'model_id': row.forecast_model_id
})
# forecast models join sql
forecast_models_joins += sql_forecast_by_model % forecast_config
# forecast date ini selection
forecast_date_ini_epiweek += '''
WHEN forecast%(model_id)s.init_date_epiweek IS NOT NULL
THEN forecast%(model_id)s.init_date_epiweek
''' % forecast_config
# forecast epiweek selection
forecast_epiweek += '''
WHEN forecast%(model_id)s.epiweek IS NOT NULL
THEN forecast%(model_id)s.epiweek
''' % forecast_config
# forecast models cases selection
forecast_models_cases += (
',forecast_%(model_name)s_cases' % forecast_config
)
if forecast_models_cases == '':
forecast_models_cases = ',1'
sql = sql % {
'forecast_models_joins': forecast_models_joins,
'forecast_models_cases': forecast_models_cases,
'forecast_date_ini_epiweek': forecast_date_ini_epiweek,
'forecast_epiweek': forecast_epiweek,
'sql_alert': sql_alert
}
with db_engine.connect() as conn:
return pd.read_sql(sql, con=conn, parse_dates=True)
| gpl-3.0 |
VictorPelaez/coral-reef-optimization-algorithm | examples/example_advanced.py | 1 | 4626 | #!/usr/bin/env python
# coding=utf-8
###############################################################################
import context
from cro.cro import CRO
from cro.fitness import feature_selection
from cro.utils import load_data
from cro.report import plot_results
import time
from functools import partial
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_auc_score, mean_squared_error
from sklearn import datasets, ensemble
import numpy as np
if __name__ == '__main__':
"""
Example I: feature selection Classification (max auc)
https://www.kaggle.com/primaryobjects/voicegender
This database was created to identify a voice as male or female, based upon acoustic properties of the voice and speech.
The dataset consists of 3,168 recorded voice samples, collected from male and female speakers. It contains 20 features and I added 10 noisy!
"""
## ------------------------------------------------------
## Parameters initialization
## ------------------------------------------------------
Ngen = 20 # Number of generations
N = 10 # MxN: reef size
M = 10 # MxN: reef size
Fb = 0.8 # Broadcast prob.
Fa = 0.2 # Asexual reproduction prob.
Fd = 0.1 # Fraction of the corals to be eliminated in the depredation operator.
r0 = 0.6 # Free/total initial proportion
k = 3 # Number of opportunities for a new coral to settle in the reef
Pd = 0.1 # Depredation prob.
opt= 'max' # flag: 'max' for maximizing and 'min' for minimizing
## ------------------------------------------------------
dataset = load_data('voice')
L = dataset.data.shape[1] # number of features
X = dataset.data
y = dataset.target
clf = KNeighborsClassifier(2)
fitness_coral = partial(feature_selection, X=X, y=y, model=clf,
get_prediction = lambda clf, X: clf.predict_proba(X)[:, 1],
metric=roc_auc_score)
start = time.time()
cro = CRO(Ngen, N, M, Fb, Fa, Fd, r0, k, Pd, fitness_coral, opt, L, seed=13, verbose=True)
(REEF, REEFpob, REEFfitness, ind_best, Bestfitness, Meanfitness) = cro.fit(X, y, clf)
plot_results(Bestfitness, Meanfitness, cro, filename=None)
print("Example I: feature selection Classification (max auc): ", time.time() - start, "seconds.")
names = np.array(dataset.feature_names)
print(names[REEFpob[ind_best, :]>0])
"""
Example II: feature selection, regression (min mse)
"""
## ------------------------------------------------------
## Parameters initialization
## ------------------------------------------------------
Ngen = 25 # Number of generations
N = 10 # MxN: reef size
M = 10 # MxN: reef size
Fb = 0.8 # Broadcast prob.
Fa = 0.2 # Asexual reproduction prob.
Fd = 0.1 # Fraction of the corals to be eliminated in the depredation operator.
r0 = 0.7 # Free/total initial proportion
k = 3 # Number of opportunities for a new coral to settle in the reef
Pd = 0.1 # Depredation prob.
opt= 'min' # flag: 'max' for maximizing and 'min' for minimizing
## ------------------------------------------------------
dataset = datasets.load_boston()
L = dataset.data.shape[1] # number of features
X = dataset.data
y = dataset.target
params = {'n_estimators': 60, 'max_depth': 4, 'min_samples_split': 2}
gbr = ensemble.GradientBoostingRegressor(**params)
fitness_coral = partial(feature_selection, X=X, y=y, model=gbr,
get_prediction=lambda gbr, X: gbr.predict(X),
metric=mean_squared_error)
start = time.time()
cro = CRO(Ngen, N, M, Fb, Fa, Fd, r0, k, Pd, fitness_coral, opt, L, seed=13, verbose=True)
(REEF, REEFpob, REEFfitness, ind_best, Bestfitness, Meanfitness) = cro.fit(X, y, gbr)
print("Example II: feature selection, regression (min mse): ", time.time() - start, "seconds.")
plot_results(Bestfitness, Meanfitness, cro, filename=None)
names = np.array(['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT'])
print(names[REEFpob[ind_best, :]>0])
| mit |
Srisai85/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
MartinDelzant/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
davmre/treegp | plot.py | 1 | 1762 | import numpy as np
import matplotlib.pyplot as plt
def predict_1d (
gp,
num_steps = 100,
x_min = None,
x_max = None,
):
"""
Plot a gp's prediction with error bars of 2*std.
"""
if None == x_min: x_min = min( x[0] for x in gp.X )
if None == x_max: x_max = max( x[0] for x in gp.X )
x_max = float( x_max )
x_min = float( x_min )
predict_x = np.reshape(np.linspace( x_min, x_max, num_steps ), (-1, 1))
mean = gp.predict( predict_x )
variance = gp.variance( predict_x )
plt.figure()
z = zip( predict_x, mean.flat, variance )
data = [
(x,y,max(v,0.0)) for (x,y,v) in z
]
data.sort( key = lambda d: d[0] ) # sort on X axis
# plot mean predictions
predict_x = [ d[0] for d in data ]
predict_y = np.array( [ d[1] for d in data ] )
plt.plot( predict_x, predict_y, color='k', linestyle=':' )
# plot error bars
sd = np.sqrt( np.array( [ d[2] for d in data ] ) )
var_x = np.concatenate((predict_x, predict_x[::-1]))
var_y = np.concatenate((predict_y + 2.0 * sd, (predict_y - 2.0 * sd)[::-1]))
p = plt.fill(var_x, var_y, edgecolor='w', facecolor='#d3d3d3')
def interpolate_surface(gp, X, y):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xmin = np.min(X, 0)
xmax = np.max(X, 0)
u = np.linspace(xmin[0], xmax[0], 20)
v = np.linspace(xmin[1], xmax[1], 20)
xc = np.outer(np.ones((20,)), u)
yc = np.outer(v, np.ones((20,)))
k = np.zeros(xc.shape)
for i in range(xc.shape[0]):
for j in range(xc.shape[1]):
k[i,j] = gp.predict((xc[i,j], yc[i,j]))
#print xmin, xmax
#print u, v
#print x, y, k
ax.plot_surface(xc, yc, k, color='b')
plt.show()
| gpl-3.0 |
muntasirsyed/intellij-community | python/helpers/pydev/pydevconsole.py | 41 | 15763 | from _pydev_imps._pydev_thread import start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from pydevconsole_code_for_ironpython import InteractiveConsole
from code import compile_command
from code import InteractiveInterpreter
import os
import sys
import _pydev_threading as threading
import traceback
import fix_getpass
fix_getpass.fixGetpass()
import pydevd_vars
from pydev_imports import Exec, _queue
try:
import __builtin__
except:
import builtins as __builtin__
try:
False
True
except NameError: # version < 2.3 -- didn't have the True/False builtins
import __builtin__
setattr(__builtin__, 'True', 1) #Python 3.0 does not accept __builtin__.True = 1 in its syntax
setattr(__builtin__, 'False', 0)
from pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from pydev_console_utils import CodeFragment
IS_PYTHON_3K = False
IS_PY24 = False
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except:
#That's OK, not all versions of python have sys.version_info
pass
class Command:
def __init__(self, interpreter, code_fragment):
"""
:type code_fragment: CodeFragment
:type interpreter: InteractiveConsole
"""
self.interpreter = interpreter
self.code_fragment = code_fragment
self.more = None
def symbol_for_fragment(code_fragment):
if code_fragment.is_single_line:
symbol = 'single'
else:
symbol = 'exec' # Jython doesn't support this
return symbol
symbol_for_fragment = staticmethod(symbol_for_fragment)
def run(self):
text = self.code_fragment.text
symbol = self.symbol_for_fragment(self.code_fragment)
self.more = self.interpreter.runsource(text, '<input>', symbol)
try:
try:
execfile #Not in Py3k
except NameError:
from pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from pydev_umd import runfile, _set_globals_function
try:
import builtins
builtins.runfile = runfile
except:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, mainThread):
BaseInterpreterInterface.__init__(self, mainThread)
self.client_port = client_port
self.host = host
self.namespace = {}
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def doAddExec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def getNamespace(self):
return self.namespace
def getCompletions(self, text, act_tok):
try:
from _pydev_completer import Completer
completer = Completer(self.namespace, None)
return completer.complete(act_tok)
except:
import traceback
traceback.print_exc()
return []
def close(self):
sys.exit(0)
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def process_exec_queue(interpreter):
from pydev_ipython.inputhook import get_inputhook, set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
from pydev_import_hook import import_hook_manager
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
import_hook_manager.add_module_name("matplotlib", lambda: activate_matplotlib(interpreter.enableGui))
# enable_gui_function in activate_matplotlib should be called in main thread. That's why we call
# interpreter.enableGui which put it into the interpreter's exec_queue and executes it in the main thread.
import_hook_manager.add_module_name("pylab", activate_pylab)
import_hook_manager.add_module_name("pyplot", activate_pyplot)
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if callable(code_fragment):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.addExec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def DoExit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def handshake():
return "PyCharm"
#=======================================================================================================================
# StartServer
#=======================================================================================================================
def start_server(host, port, interpreter):
if port == 0:
host = ''
#I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse.
from pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport
try:
if IS_PY24:
server = XMLRPCServer((host, port), logRequests=False)
else:
server = XMLRPCServer((host, port), logRequests=False, allow_none=True)
except:
sys.stderr.write('Error starting server with host: %s, port: %s, client_port: %s\n' % (host, port, interpreter.client_port))
raise
# Tell UMD the proper default namespace
_set_globals_function(interpreter.getNamespace)
server.register_function(interpreter.execLine)
server.register_function(interpreter.execMultipleLines)
server.register_function(interpreter.getCompletions)
server.register_function(interpreter.getFrame)
server.register_function(interpreter.getVariable)
server.register_function(interpreter.changeVariable)
server.register_function(interpreter.getDescription)
server.register_function(interpreter.close)
server.register_function(interpreter.interrupt)
server.register_function(handshake)
server.register_function(interpreter.connectToDebugger)
server.register_function(interpreter.hello)
server.register_function(interpreter.getArray)
server.register_function(interpreter.evaluate)
# Functions for GUI main loop integration
server.register_function(interpreter.enableGui)
if port == 0:
(h, port) = server.socket.getsockname()
print(port)
print(interpreter.client_port)
sys.stderr.write(interpreter.get_greeting_msg())
sys.stderr.flush()
server.serve_forever()
return server
def StartServer(host, port, client_port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = DoExit
interpreter = InterpreterInterface(host, client_port, threading.currentThread())
start_new_thread(start_server,(host, port, interpreter))
process_exec_queue(interpreter)
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
setattr(__builtin__, 'interpreter', interpreterInterface)
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#===============================================================================
# Debugger integration
#===============================================================================
def exec_code(code, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
res = interpreterInterface.needMore(code)
if res:
return True
interpreterInterface.addExec(code)
return False
class ConsoleWriter(InteractiveInterpreter):
skip = 0
def __init__(self, locals=None):
InteractiveInterpreter.__init__(self, locals)
def write(self, data):
#if (data.find("global_vars") == -1 and data.find("pydevd") == -1):
if self.skip > 0:
self.skip -= 1
else:
if data == "Traceback (most recent call last):\n":
self.skip = 1
sys.stderr.write(data)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred."""
#Override for avoid using sys.excepthook PY-12600
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
sys.stderr.write(''.join(list))
def showtraceback(self):
"""Display the exception that just occurred."""
#Override for avoid using sys.excepthook PY-12600
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
lines = traceback.format_list(tblist)
if lines:
lines.insert(0, "Traceback (most recent call last):\n")
lines.extend(traceback.format_exception_only(type, value))
finally:
tblist = tb = None
sys.stderr.write(''.join(lines))
def consoleExec(thread_id, frame_id, expression):
"""returns 'False' in case expression is partially correct
"""
frame = pydevd_vars.findFrame(thread_id, frame_id)
expression = str(expression.replace('@LINE@', '\n'))
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
if IPYTHON:
return exec_code(CodeFragment(expression), updated_globals, frame.f_locals)
interpreter = ConsoleWriter()
try:
code = compile_command(expression)
except (OverflowError, SyntaxError, ValueError):
# Case 1
interpreter.showsyntaxerror()
return False
if code is None:
# Case 2
return True
#Case 3
try:
Exec(code, updated_globals, frame.f_locals)
except SystemExit:
raise
except:
interpreter.showtraceback()
return False
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn()
port, client_port = sys.argv[1:3]
import pydev_localhost
if int(port) == 0 and int(client_port) == 0:
(h, p) = pydev_localhost.get_socket_name()
client_port = p
pydevconsole.StartServer(pydev_localhost.get_localhost(), int(port), int(client_port))
| apache-2.0 |
akrherz/iem | htdocs/plotting/auto/scripts200/p219.py | 1 | 9579 | """Visualization of TAFs"""
import datetime
# third party
import requests
import pandas as pd
import numpy as np
import matplotlib.patheffects as PathEffects
from matplotlib.patches import Rectangle
from metpy.units import units
from metpy.calc import wind_components
from pandas.io.sql import read_sql
from pyiem.plot import figure
from pyiem.util import get_autoplot_context, get_dbconn, utc
from pyiem.exceptions import NoDataFound
VIS = "visibility"
TEXTARGS = {
"fontsize": 12,
"color": "k",
"ha": "center",
"va": "center",
"zorder": 3,
}
PE = [PathEffects.withStroke(linewidth=5, foreground="white")]
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc["cache"] = 600
desc[
"description"
] = """
This app generates infographics for Terminal Aerodome Forecasts (TAF).
You need not provide an exact valid timestamp for the TAF issuance, the
app will search backwards in time up to 24 hours to find the nearest
issuance stored in the database.
"""
desc["arguments"] = [
dict(
type="text",
default="KDSM",
name="station",
label="Select station to plot:",
),
dict(
type="datetime",
name="valid",
default=utc().strftime("%Y/%m/%d %H%M"),
label="TAF Issuance/Valid Timestamp (UTC Timezone):",
min="1995/01/01 0000",
),
]
return desc
def get_text(product_id):
"""get the raw text."""
text = "Text Unavailable, Sorry."
uri = f"https://mesonet.agron.iastate.edu/api/1/nwstext/{product_id}"
try:
req = requests.get(uri, timeout=5)
if req.status_code == 200:
text = req.content.decode("ascii", "ignore").replace("\001", "")
text = "\n".join(text.replace("\r", "").split("\n")[5:])
except Exception:
pass
return text
def taf_search(pgconn, station, valid):
"""Go look for a nearest in time TAF."""
cursor = pgconn.cursor()
cursor.execute(
"SELECT valid at time zone 'UTC' from taf "
"WHERE station = %s and valid > %s and "
"valid < %s ORDER by valid DESC",
(station, valid - datetime.timedelta(hours=24), valid),
)
if cursor.rowcount == 0:
return None
return cursor.fetchone()[0].replace(tzinfo=datetime.timezone.utc)
def compute_flight_condition(row):
"""What's our status."""
# TEMPO may not address sky or vis
if row["is_tempo"] and (not row["skyc"] or pd.isna(row[VIS])):
return None
level = 10000
if "OVC" in row["skyc"]:
level = row["skyl"][row["skyc"].index("OVC")]
if level == 10000 and "BKN" in row["skyc"]:
level = row["skyl"][row["skyc"].index("BKN")]
if row[VIS] > 5 and level > 3000:
return "VFR"
if level < 500 or row[VIS] < 1:
return "LIFR"
if level < 1000 or row[VIS] < 3:
return "IFR"
if level <= 3000 or row[VIS] <= 5:
return "MVFR"
return "UNK"
def plotter(fdict):
"""Go"""
ctx = get_autoplot_context(fdict, get_description())
valid = ctx["valid"].replace(tzinfo=datetime.timezone.utc)
pgconn = get_dbconn("asos")
def fetch(ts):
"""Getme data."""
return read_sql(
"SELECT f.*, t.product_id from taf t JOIN taf_forecast f on "
"(t.id = f.taf_id) WHERE t.station = %s and t.valid = %s "
"ORDER by f.valid ASC",
pgconn,
params=(ctx["station"], ts),
index_col="valid",
)
df = fetch(valid)
if df.empty:
valid = taf_search(pgconn, ctx["station"], valid)
if valid is None:
raise NoDataFound("TAF data was not found!")
df = fetch(valid)
df = df.fillna(np.nan)
df["next_valid"] = (
df.reset_index().shift(-1)["valid"].values - df.index.values
)
product_id = df.iloc[0]["product_id"]
title = (
f"{ctx['station']} Terminal Aerodome Forecast by NWS "
f"{product_id[14:17]}\n"
f"Valid: {valid.strftime('%-d %b %Y %H:%M UTC')}"
)
fig = figure(title=title)
###
text = get_text(product_id)
res = fig.text(0.43, 0.01, text.strip(), va="bottom", fontsize=12)
bbox = res.get_window_extent(fig.canvas.get_renderer())
figbbox = fig.get_window_extent()
# one-two line TAFs cause the legend to go off-screen
yndc = max([bbox.y1 / figbbox.y1, 0.13])
# Create the main axes that will hold all our hackery
ax = fig.add_axes([0.08, yndc + 0.05, 0.9, 0.9 - yndc - 0.05])
fig.text(0.015, 0.3, "Cloud Coverage & Level", rotation=90)
df["u"], df["v"] = [
x.m
for x in wind_components(
units("knot") * df["sknt"].values,
units("degree") * df["drct"].values,
)
]
df["ws_u"], df["ws_v"] = [
x.m
for x in wind_components(
units("knot") * df["ws_sknt"].values,
units("degree") * df["ws_drct"].values,
)
]
# Initialize a fcond with string type
df["fcond"] = ""
sz = len(df.index)
clevels = []
clevelx = []
for valid0, row in df.iterrows():
valid = valid0
if not pd.isna(row["end_valid"]):
valid = valid + (row["end_valid"] - valid) / 2
# Between 1-3 plot the clouds
for j, skyc in enumerate(row["skyc"]):
level = min([3200, row["skyl"][j]]) / 1600 + 1
if j + 1 == len(row["skyc"]):
clevelx.append(valid)
clevels.append(level)
ax.text(valid, level, skyc, **TEXTARGS).set_path_effects(PE)
# At 0.9 present weather
delta = row["next_valid"]
rotation = 0
if not pd.isna(delta) and delta < datetime.timedelta(hours=2):
rotation = 45
ax.text(
valid,
0.9,
"\n".join(row["presentwx"]),
rotation=rotation,
**TEXTARGS,
).set_path_effects(PE)
# Plot wind as text string
if not pd.isna(row["ws_sknt"]):
ax.text(
valid,
3.8 + (0.5 if row["v"] > 0 else 0.5),
"WS%g" % (row["ws_sknt"],),
ha="center",
fontsize=TEXTARGS["fontsize"],
va="top" if row["v"] < 0 else "bottom",
color="r",
).set_path_effects(PE)
text = f"{row['sknt']:.0f}"
if not pd.isna(row["gust"]) and row["gust"] > 0:
text += f"G{row['gust']:.0f}"
if not pd.isna(row["sknt"]):
ax.text(
valid,
3.8 + (0.35 if row["v"] > 0 else 0.35),
f"{text}KT",
ha="center",
fontsize=TEXTARGS["fontsize"],
color=TEXTARGS["color"],
va="top" if row["v"] < 0 else "bottom",
).set_path_effects(PE)
df.at[valid0, "fcond"] = compute_flight_condition(row)
# At 3.25 plot the visibility
if not pd.isna(row[VIS]):
pltval = f"{row['visibility']:g}"
if row["visibility"] > 6:
pltval = "6+"
ax.text(valid, 3.25, pltval, **TEXTARGS).set_path_effects(PE)
if clevels:
ax.plot(clevelx, clevels, linestyle=":", zorder=2)
# Between 3.5-4.5 plot the wind arrows
ax.barbs(
df.index.values,
[3.8] * sz,
df["u"].values,
df["v"].values,
zorder=3,
color="k",
)
ax.barbs(
df.index.values,
[3.8] * sz,
df["ws_u"].values,
df["ws_v"].values,
zorder=4,
color="r",
)
padding = datetime.timedelta(minutes=60)
ax.set_xlim(df.index.min() - padding, df.index.max() + padding)
ax.set_yticks([0.9, 1.5, 2, 2.5, 3, 3.25, 3.8])
ax.set_yticklabels(
[
"WX",
"800ft",
"1600ft",
"2400ft",
"3200+ft",
"Vis (mile)",
"Wind (KT)",
]
)
ax.set_ylim(0.8, 4.5)
for y in [1, 3.125, 3.375]:
ax.axhline(
y,
color="blue",
lw=0.5,
)
colors = {
"UNK": "#EEEEEE",
"VFR": "green",
"MVFR": "blue",
"IFR": "red",
"LIFR": "magenta",
}
# Colorize things by flight condition
xs = df.index.to_list()
xs[0] = xs[0] - padding
xs.append(df.index.max() + padding)
previous = "VFR"
for i, val in enumerate(df["fcond"].values):
if val is None:
val = previous
previous = val
ax.axvspan(
xs[i],
xs[i + 1],
fc=colors.get(val, "white"),
ec="None",
alpha=0.5,
zorder=2,
)
rects = []
for fcond in colors:
rects.append(Rectangle((0, 0), 1, 1, fc=colors[fcond], alpha=0.5))
ax.legend(
rects,
colors.keys(),
ncol=3,
loc="upper left",
fontsize=14,
bbox_to_anchor=(0.0, -0.04),
fancybox=True,
shadow=True,
)
# Need to get rid of timezones
df = df.reset_index()
for col in ["valid", "end_valid"]:
# some rows could be NaN
df[col] = df[~pd.isna(df[col])][col].apply(
lambda x: x.strftime("%Y-%m-%d %H:%M")
)
return fig, df.drop("next_valid", axis=1)
if __name__ == "__main__":
plotter(dict(station="KMCK", valid="2021-07-06 1606"))
| mit |
akrherz/iem | htdocs/request/maxcsv.py | 1 | 18372 | """Provide some CSV Files
first four columns need to be
ID,Station,Latitude,Longitude
"""
import datetime
import re
import sys
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
# third party
import requests
import ephem
import pytz
import pandas as pd
from pandas.io.sql import read_sql
from paste.request import parse_formvars
from pyiem.util import get_dbconn, utc
# DOT plows
# RWIS sensor data
# River gauges
# Ag data (4" soil temps)
# Moon
def figurePhase(p1, p2):
""" Return a string of the moon phase! """
if p2 < p1: # Waning!
if p1 < 0.1:
return "New Moon"
if p1 < 0.4:
return "Waning Crescent"
if p1 < 0.6:
return "Last Quarter"
if p1 < 0.9:
return "Waning Gibbous"
return "Full Moon"
if p1 < 0.1:
return "New Moon"
if p1 < 0.4:
return "Waxing Crescent"
if p1 < 0.6:
return "First Quarter"
if p1 < 0.9:
return "Waxing Gibbous"
return "Full Moon"
def do_moon(lon, lat):
"""Moon fun."""
moon = ephem.Moon()
obs = ephem.Observer()
obs.lat = str(lat)
obs.long = str(lon)
obs.date = utc().strftime("%Y/%m/%d %H:%M")
r1 = obs.next_rising(moon).datetime().replace(tzinfo=datetime.timezone.utc)
p1 = moon.moon_phase
obs.date = r1.strftime("%Y/%m/%d %H:%M")
s1 = (
obs.next_setting(moon).datetime().replace(tzinfo=datetime.timezone.utc)
)
# Figure out the next rise time
obs.date = s1.strftime("%Y/%m/%d %H:%M")
r2 = obs.next_rising(moon).datetime().replace(tzinfo=datetime.timezone.utc)
p2 = moon.moon_phase
obs.date = r2.strftime("%Y/%m/%d %H:%M")
s2 = (
obs.next_setting(moon).datetime().replace(tzinfo=datetime.timezone.utc)
)
label = figurePhase(p1, p2)
# Figure out the timezone
cursor = get_dbconn("mesosite").cursor()
cursor.execute(
"select tzid from tz_world WHERE "
"st_contains(geom, st_setsrid(ST_Point(%s, %s), 4326))",
(lon, lat),
)
if cursor.rowcount == 0:
tzid = "UTC"
else:
tzid = cursor.fetchone()[0]
tz = ZoneInfo(tzid)
return pd.DataFrame(
{
"longitude": lon,
"latitude": lat,
"moon_rise_date": r1.astimezone(tz).strftime("%Y/%m/%d"),
"moon_rise_time": r1.astimezone(tz).strftime("%-I:%M %P"),
"moon_set_date": s1.astimezone(tz).strftime("%Y/%m/%d"),
"moon_set_time": s1.astimezone(tz).strftime("%-I:%M %P"),
"percent_illum_at_rise": round(p1 * 100, 4),
"phase": label,
"next_moon_rise_date": r2.astimezone(tz).strftime("%Y/%m/%d"),
"next_moon_rise_time": r2.astimezone(tz).strftime("%-I:%M %P"),
"next_moon_set_date": s2.astimezone(tz).strftime("%Y/%m/%d"),
"next_moon_set_time": s2.astimezone(tz).strftime("%-I:%M %P"),
"next_percent_illum_at_rise": round(p2 * 100, 4),
"timezone": tzid,
},
index=[0],
)
def do_iaroadcond():
"""Iowa DOT Road Conditions as dots"""
pgconn = get_dbconn("postgis")
df = read_sql(
"""
select b.idot_id as locationid,
replace(b.longname, ',', ' ') as locationname,
ST_y(ST_transform(ST_centroid(b.geom),4326)) as latitude,
ST_x(ST_transform(ST_centroid(b.geom),4326)) as longitude, cond_code
from roads_base b JOIN roads_current c on (c.segid = b.segid)
""",
pgconn,
)
return df
def do_webcams(network):
"""direction arrows"""
pgconn = get_dbconn("mesosite")
df = read_sql(
"""
select cam as locationid, w.name as locationname, st_y(geom) as latitude,
st_x(geom) as longitude, drct
from camera_current c JOIN webcams w on (c.cam = w.id)
WHERE c.valid > (now() - '30 minutes'::interval) and w.network = %s
""",
pgconn,
params=(network,),
)
return df
def do_iowa_azos(date, itoday=False):
"""Dump high and lows for Iowa ASOS + AWOS """
pgconn = get_dbconn("iem")
df = read_sql(
f"""
select id as locationid, n.name as locationname, st_y(geom) as latitude,
st_x(geom) as longitude, s.day, s.max_tmpf::int as high,
s.min_tmpf::int as low, coalesce(pday, 0) as precip
from stations n JOIN summary_{date.year} s on (n.iemid = s.iemid)
WHERE n.network in ('IA_ASOS', 'AWOS') and s.day = %s
""",
pgconn,
params=(date,),
index_col="locationid",
)
if itoday:
# Additionally, piggy back rainfall totals
df2 = read_sql(
"""
SELECT id as station,
sum(phour) as precip720,
sum(case when valid >= (now() - '168 hours'::interval)
then phour else 0 end) as precip168,
sum(case when valid >= (now() - '72 hours'::interval)
then phour else 0 end) as precip72,
sum(case when valid >= (now() - '48 hours'::interval)
then phour else 0 end) as precip48,
sum(case when valid >= (now() - '24 hours'::interval)
then phour else 0 end) as precip24
from hourly h JOIN stations t on (h.iemid = t.iemid)
where t.network in ('IA_ASOS', 'AWOS')
and valid >= now() - '720 hours'::interval
and phour > 0.005 GROUP by id
""",
pgconn,
index_col="station",
)
for col in [
"precip24",
"precip48",
"precip72",
"precip168",
"precip720",
]:
df[col] = df2[col]
# make sure the new column is >= precip
df.loc[df[col] < df["precip"], col] = df["precip"]
df.reset_index(inplace=True)
return df
def do_iarwis():
"""Dump RWIS data"""
pgconn = get_dbconn("iem")
df = read_sql(
"""
select id as locationid, n.name as locationname, st_y(geom) as latitude,
st_x(geom) as longitude, tsf0 as pavetmp1, tsf1 as pavetmp2,
tsf2 as pavetmp3, tsf3 as pavetmp4
from stations n JOIN current s on (n.iemid = s.iemid)
WHERE n.network in ('IA_RWIS', 'WI_RWIS', 'IL_RWIS') and
s.valid > (now() - '2 hours'::interval)
""",
pgconn,
)
# Compute simple average in whole degree F
df["paveavg"] = (
df[["pavetmp1", "pavetmp2", "pavetmp3", "pavetmp4"]]
.mean(axis=1)
.map(lambda x: "%.0f" % x if not pd.isna(x) else "")
)
return df
def do_ahps_obs(nwsli):
"""Create a dataframe with AHPS river stage and CFS information"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
# Get metadata
cursor.execute(
"""
SELECT name, st_x(geom), st_y(geom), tzname from stations
where id = %s and network ~* 'DCP'
""",
(nwsli,),
)
row = cursor.fetchone()
latitude = row[2]
longitude = row[1]
stationname = row[0]
tzinfo = pytz.timezone(row[3])
# Figure out which keys we have
cursor.execute(
"""
with obs as (
select distinct key from hml_observed_data where station = %s
and valid > now() - '3 days'::interval)
SELECT k.id, k.label from hml_observed_keys k JOIN obs o on (k.id = o.key)
""",
(nwsli,),
)
if cursor.rowcount == 0:
return "NO DATA"
plabel = cursor.fetchone()[1]
slabel = cursor.fetchone()[1]
df = read_sql(
"""
WITH primaryv as (
SELECT valid, value from hml_observed_data WHERE station = %s
and key = get_hml_observed_key(%s) and valid > now() - '1 day'::interval
), secondaryv as (
SELECT valid, value from hml_observed_data WHERE station = %s
and key = get_hml_observed_key(%s) and valid > now() - '1 day'::interval
)
SELECT p.valid at time zone 'UTC' as valid,
p.value as primary_value, s.value as secondary_value,
'O' as type
from primaryv p LEFT JOIN secondaryv s ON (p.valid = s.valid)
WHERE p.valid > (now() - '72 hours'::interval)
ORDER by p.valid DESC
""",
pgconn,
params=(nwsli, plabel, nwsli, slabel),
index_col=None,
)
sys.stderr.write(str(plabel))
sys.stderr.write(str(slabel))
df["locationid"] = nwsli
df["locationname"] = stationname
df["latitude"] = latitude
df["longitude"] = longitude
df["Time"] = (
df["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%m/%d/%Y %H:%M")
)
df[plabel] = df["primary_value"]
df[slabel] = df["secondary_value"]
# we have to do the writing from here
res = "Observed Data:,,\n"
res += "|Date|,|Stage|,|--Flow-|\n"
odf = df[df["type"] == "O"]
for _, row in odf.iterrows():
res += "%s,%.2fft,%.1fkcfs\n" % (
row["Time"],
row["Stage[ft]"],
row["Flow[kcfs]"],
)
return res
def do_ahps_fx(nwsli):
"""Create a dataframe with AHPS river stage and CFS information"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
# Get metadata
cursor.execute(
"""
SELECT name, st_x(geom), st_y(geom), tzname from stations
where id = %s and network ~* 'DCP'
""",
(nwsli,),
)
row = cursor.fetchone()
latitude = row[2]
longitude = row[1]
stationname = row[0]
tzinfo = pytz.timezone(row[3])
# Get the last forecast
cursor.execute(
"""
select id, forecast_sts at time zone 'UTC',
generationtime at time zone 'UTC', primaryname, primaryunits,
secondaryname, secondaryunits
from hml_forecast where station = %s
and generationtime > now() - '7 days'::interval
ORDER by issued DESC LIMIT 1
""",
(nwsli,),
)
row = cursor.fetchone()
primaryname = row[3]
generationtime = row[2]
primaryunits = row[4]
secondaryname = row[5]
secondaryunits = row[6]
y = "{}".format(generationtime.year)
# Get the latest forecast
df = read_sql(
"""
SELECT valid at time zone 'UTC' as valid,
primary_value, secondary_value, 'F' as type from
hml_forecast_data_"""
+ y
+ """ WHERE hml_forecast_id = %s
ORDER by valid ASC
""",
pgconn,
params=(row[0],),
index_col=None,
)
# Get the obs
plabel = "{}[{}]".format(primaryname, primaryunits)
slabel = "{}[{}]".format(secondaryname, secondaryunits)
sys.stderr.write(str(primaryname))
sys.stderr.write(str(secondaryname))
df["locationid"] = nwsli
df["locationname"] = stationname
df["latitude"] = latitude
df["longitude"] = longitude
df["Time"] = (
df["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%m/%d/%Y %H:%M")
)
df[plabel] = df["primary_value"]
df[slabel] = df["secondary_value"]
# we have to do the writing from here
res = "Forecast Data (Issued %s UTC):,\n" % (
generationtime.strftime("%m-%d-%Y %H:%M:%S"),
)
res += "|Date|,|Stage|,|--Flow-|\n"
odf = df[df["type"] == "F"]
for _, row in odf.iterrows():
res += "%s,%.2fft,%.1fkcfs\n" % (
row["Time"],
row["Stage[ft]"],
row["Flow[kcfs]"],
)
return res
def feet(val, suffix="'"):
"""Make feet indicator"""
if pd.isnull(val) or val == "":
return ""
return "%.1f%s" % (val, suffix)
def do_ahps(nwsli):
"""Create a dataframe with AHPS river stage and CFS information"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
# Get metadata
cursor.execute(
"""
SELECT name, st_x(geom), st_y(geom), tzname from stations
where id = %s and network ~* 'DCP'
""",
(nwsli,),
)
row = cursor.fetchone()
latitude = row[2]
longitude = row[1]
stationname = row[0].replace(",", " ")
tzinfo = pytz.timezone(row[3])
# Get the last forecast
cursor.execute(
"""
select id, forecast_sts at time zone 'UTC',
generationtime at time zone 'UTC', primaryname, primaryunits,
secondaryname, secondaryunits
from hml_forecast where station = %s
and generationtime > now() - '7 days'::interval
ORDER by issued DESC LIMIT 1
""",
(nwsli,),
)
if cursor.rowcount == 0:
return "NO DATA"
row = cursor.fetchone()
generationtime = row[2]
y = "{}".format(generationtime.year)
# Figure out which keys we have
cursor.execute(
"""
with obs as (
select distinct key from hml_observed_data where station = %s
and valid > now() - '3 days'::interval)
SELECT k.id, k.label from hml_observed_keys k JOIN obs o on (k.id = o.key)
""",
(nwsli,),
)
if cursor.rowcount == 0:
return "NO DATA"
lookupkey = 14
for _row in cursor:
if _row[1].find("[ft]") > 0:
lookupkey = _row[0]
break
# get observations
odf = read_sql(
"""
SELECT valid at time zone 'UTC' as valid,
value from hml_observed_data WHERE station = %s
and key = %s and valid > now() - '3 day'::interval
and extract(minute from valid) = 0
ORDER by valid DESC
""",
pgconn,
params=(nwsli, lookupkey),
index_col=None,
)
# hoop jumping to get a timestamp in the local time of this sensor
# see akrherz/iem#187
odf["obtime"] = (
odf["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%a. %-I %p")
)
# Get the latest forecast
df = read_sql(
"""
SELECT valid at time zone 'UTC' as valid,
primary_value, secondary_value, 'F' as type from
hml_forecast_data_"""
+ y
+ """ WHERE hml_forecast_id = %s
ORDER by valid ASC
""",
pgconn,
params=(row[0],),
index_col=None,
)
# Get the obs
# plabel = "{}[{}]".format(primaryname, primaryunits)
# slabel = "{}[{}]".format(secondaryname, secondaryunits)
odf.rename({"value": "obstage"}, axis=1, inplace=True)
df = df.join(odf[["obtime", "obstage"]], how="outer")
# hoop jumping to get a timestamp in the local time of this sensor
# see akrherz/iem#187
df["forecasttime"] = (
df["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%a. %-I %p")
)
df["forecaststage"] = df["primary_value"]
# df[slabel] = df['secondary_value']
# we have to do the writing from here
res = (
"locationid,locationname,latitude,longitude,obtime,obstage,"
"obstage2,obstagetext,forecasttime,forecaststage,forecaststage1,"
"forecaststage2,forecaststage3,highestvalue,highestvalue2,"
"highestvaluedate\n"
)
res += ",,,,,,,,,,,,,,,\n,,,,,,,,,,,,,,,\n"
maxrow = df.sort_values("forecaststage", ascending=False).iloc[0]
for idx, row in df.iterrows():
fs = (
row["forecaststage"] if not pd.isnull(row["forecaststage"]) else ""
)
res += ("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n") % (
nwsli if idx == 0 else "",
stationname if idx == 0 else "",
latitude if idx == 0 else "",
longitude if idx == 0 else "",
row["obtime"],
row["obstage"],
feet(row["obstage"]),
"Unknown" if idx == 0 else "",
(row["forecasttime"] if row["forecasttime"] != "NaT" else ""),
feet(row["forecaststage"], "ft"),
fs,
feet(row["forecaststage"]),
fs,
"" if idx > 0 else maxrow["forecaststage"],
"" if idx > 0 else feet(maxrow["forecaststage"]),
"" if idx > 0 else maxrow["forecasttime"],
)
return res
def do_uvi():
"""UVI index."""
PATTERN = re.compile(
r"(?P<c1>[A-Z\s]+)\s+(?P<s1>[A-Z][A-Z])\s+(?P<u1>\d+)\s+"
r"(?P<c2>[A-Z\s]+)\s+(?P<s2>[A-Z][A-Z])\s+(?P<u2>\d+)",
)
URL = (
"https://www.cpc.ncep.noaa.gov/"
"products/stratosphere/uv_index/bulletin.txt"
)
req = requests.get(URL, timeout=20)
rows = []
for line in req.content.decode("ascii").split("\n"):
m = PATTERN.match(line)
if not m:
continue
data = m.groupdict()
for i in ["1", "2"]:
rows.append(
{
"City": data[f"c{i}"].strip(),
"State": data[f"s{i}"].strip(),
"UVI": data[f"u{i}"].strip(),
}
)
return pd.DataFrame(rows)
def router(appname):
"""Process and return dataframe"""
# elif appname == 'iadotplows':
# df = do_iadotplows()
# elif appname == 'iariver':
# df = do_iariver()
# elif appname == 'isusm':
# df = do_isusm()
if appname.startswith("ahpsobs_"):
df = do_ahps_obs(appname[8:].upper()) # we write ourselves and exit
elif appname.startswith("ahpsfx_"):
df = do_ahps_fx(appname[7:].upper()) # we write ourselves and exit
elif appname.startswith("ahps_"):
df = do_ahps(appname[5:].upper()) # we write ourselves and exit
elif appname == "iaroadcond":
df = do_iaroadcond()
elif appname == "iarwis":
df = do_iarwis()
elif appname == "iowayesterday":
df = do_iowa_azos(datetime.date.today() - datetime.timedelta(days=1))
elif appname == "iowatoday":
df = do_iowa_azos(datetime.date.today(), True)
elif appname == "kcrgcitycam":
df = do_webcams("KCRG")
elif appname == "uvi":
df = do_uvi()
elif appname.startswith("moon"):
tokens = appname.replace(".txt", "").split("_")
df = do_moon(float(tokens[1]), float(tokens[2]))
else:
df = """ERROR, unknown report specified"""
return df
def application(environ, start_response):
"""Do Something"""
form = parse_formvars(environ)
appname = form.get("q")
res = router(appname)
start_response("200 OK", [("Content-type", "text/plain")])
if isinstance(res, pd.DataFrame):
return [res.to_csv(None, index=False).encode("ascii")]
return [res.encode("ascii")]
def test_hml():
"""Can we do it?"""
do_ahps("DBQI4")
| mit |
googleinterns/amaranth | amaranth/ml/train.py | 1 | 6282 | # Lint as: python3
"""This script is used to build and train a nutrient-prediction ML model."""
# Define imports and constants
import os
import json
from collections import defaultdict
import numpy as np
import pandas as pd
import sklearn.model_selection
import tensorflow as tf
from tensorflow import keras
import amaranth
from amaranth.ml import lib
# Directories to write files to
FDC_DATA_DIR = '../../data/fdc/' # Data set directory
MODEL_IMG_DIR = '../../docs/img/' # Model image directory
RESOURCES_DIR = '../resources/' # Project resources directory
CHROME_EXT_DIR = 'amaranth-chrome-ext/assets' # Chrome extension directory
# Fraction of data that should be used for training, validation, and testing.
# Should all sum to 1.0.
TRAIN_FRAC = 0.6
VALIDATION_FRAC = 0.2
TEST_FRAC = 0.2
# Times a token needs to appear to be in model's vocab
MIN_TOKEN_APPEARANCE = 3
# Chars to remove from dish names
DISH_NAME_FILTERS = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
def main():
print(f'Tensorflow version {tf.__version__}')
# Get data directory path
current_dir = os.path.dirname(__file__)
abs_fdc_data_dir = os.path.join(current_dir, FDC_DATA_DIR)
# Read data from disk
food = pd.read_csv(os.path.join(abs_fdc_data_dir, 'food.csv'))
nutrient = pd.read_csv(os.path.join(
abs_fdc_data_dir, 'nutrient.csv')).rename(columns={'id': 'nutrient_id'})
food_nutrient = pd.read_csv(
os.path.join(abs_fdc_data_dir, 'food_nutrient.csv'))
combined = lib.combine_dataframes('fdc_id', food, food_nutrient)
combined = lib.combine_dataframes('nutrient_id', combined, nutrient)
# Extract and format calorie data
calorie_data = lib.get_calorie_data(combined, 'kcal')
calorie_data = calorie_data[[
'description', 'data_type', 'name', 'amount', 'unit_name'
]] # Keep only relevant cols
calorie_data = lib.clean_data(calorie_data)
lib.add_calorie_labels(
calorie_data,
low_calorie_threshold=amaranth.LOW_CALORIE_THRESHOLD,
high_calorie_threshold=amaranth.HIGH_CALORIE_THRESHOLD)
# Normalize input strings
# Step 1: convert strings to lowercase
# Step 2: filter out characters present in DISH_NAME_FILTERS
# Step 3: re-combine characters back to a normal string
calorie_data['description'] = calorie_data['description'].apply(
lambda desc: desc.lower(), # Step 1
).apply(
lambda desc: [char for char in desc
if char not in DISH_NAME_FILTERS], # Step 2
).apply(
''.join, #Step 3
)
# Do some preprocessing and calculations for encoding
corpus = calorie_data['description']
vocab_size = lib.num_unique_words(corpus)
tokenized_corpus = corpus.map(lambda desc: desc.split(' '))
max_corpus_length = lib.max_sequence_length(tokenized_corpus)
# Count appearances of each word in dataset
tokenizer_cnt = defaultdict(int)
for dish_name in calorie_data['description']:
for token in dish_name.split():
tokenizer_cnt[token] += 1
# Only 'remember' words that appear at least MIN_TOKEN_APPEARANCE times
keep_tokens = []
for token, cnt in tokenizer_cnt.items():
if cnt >= MIN_TOKEN_APPEARANCE:
keep_tokens.append(token)
# Assign each token a unique number and create a dict that maps those tokens
# to their unique value
rev_tokenizer_lst = enumerate(keep_tokens)
tokenizer_lst = [(token, idx) for idx, token in rev_tokenizer_lst]
tokenizer = dict(tokenizer_lst)
# The string 'OOV' denotes words that are out-of-vocabulary
# It's equal to zero because keras' one_hot function generates indices that
# are in the range [1, vocab_size], so zero is free.
tokenizer['OOV'] = 0
json.dump(
tokenizer,
open(os.path.join(CHROME_EXT_DIR, 'tokenizer.json'), 'w'),
separators=(',', ':'))
calorie_data['input'] = calorie_data.apply(
lambda row: [
tokenizer[token] if token in tokenizer else tokenizer['OOV']
for token in row['description'].split()
],
axis=1)
# Pad 'input' column to all be the same length for embedding input
calorie_data['input'] = calorie_data.apply(
lambda row: lib.pad_list(row['input'], max_corpus_length, 0), axis=1)
# Create model
model = keras.Sequential([
keras.layers.Embedding(
vocab_size + 1,
int((vocab_size + 1)**(1 / 4)),
input_length=max_corpus_length),
keras.layers.Flatten(),
keras.layers.Dense(32, activation='sigmoid'),
keras.layers.Dense(10, activation='sigmoid'),
keras.layers.Dense(3, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=[
'categorical_accuracy',
keras.metrics.Precision(),
keras.metrics.Recall(),
])
# Model stats
model.summary()
model._layers = [ # Workaround for bug in keras.util.plot_model pylint: disable=protected-access
layer for layer in model._layers if not isinstance(layer, dict) # pylint: disable=protected-access
]
keras.utils.plot_model(
model,
to_file=os.path.join(current_dir, MODEL_IMG_DIR, 'model.png'),
show_layer_names=False,
show_shapes=True)
# Split dataset
train_set, test_set = sklearn.model_selection.train_test_split(
calorie_data,
train_size=TRAIN_FRAC + VALIDATION_FRAC,
test_size=TEST_FRAC)
# Train model
model.fit(
np.stack(train_set['input']),
np.stack(train_set['calorie_label']),
epochs=10,
validation_split=VALIDATION_FRAC / (TRAIN_FRAC + VALIDATION_FRAC),
callbacks=[keras.callbacks.TensorBoard()],
)
# Evaluate model
results = model.evaluate(
np.stack(test_set['input']),
np.stack(test_set['calorie_label']),
)
print('\nResults:')
print(results)
# Save test set predictions, generate confusion matrix
predictions = model.predict(np.stack(test_set['input']))
predictions = tf.argmax(predictions, axis=-1)
confusion = tf.math.confusion_matrix(
tf.argmax(np.stack(test_set['calorie_label']), axis=-1), predictions)
print('\nConfusion matrix')
print('x-axis: prediction')
print('y-axis: actual value')
print(confusion)
# Save model to file
model.save(os.path.join(current_dir, RESOURCES_DIR, 'model'))
if __name__ == '__main__':
main()
| apache-2.0 |
astocko/statsmodels | statsmodels/examples/ex_kernel_regression.py | 34 | 1785 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 09:17:40 2013
Author: Josef Perktold based on test file by George Panterov
"""
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
#import statsmodels.api as sm
#nparam = sm.nonparametric
italy_gdp = \
[8.556, 12.262, 9.587, 8.119, 5.537, 6.796, 8.638,
6.483, 6.212, 5.111, 6.001, 7.027, 4.616, 3.922,
4.688, 3.957, 3.159, 3.763, 3.829, 5.242, 6.275,
8.518, 11.542, 9.348, 8.02, 5.527, 6.865, 8.666,
6.672, 6.289, 5.286, 6.271, 7.94, 4.72, 4.357,
4.672, 3.883, 3.065, 3.489, 3.635, 5.443, 6.302,
9.054, 12.485, 9.896, 8.33, 6.161, 7.055, 8.717,
6.95]
italy_year = \
[1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951,
1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1952,
1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952,
1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1953, 1953,
1953, 1953, 1953, 1953, 1953, 1953]
italy_year = np.asarray(italy_year, float)
model = nparam.KernelReg(endog=[italy_gdp],
exog=[italy_year], reg_type='lc',
var_type='o', bw='cv_ls')
sm_bw = model.bw
R_bw = 0.1390096
sm_mean, sm_mfx = model.fit()
sm_mean2 = sm_mean[0:5]
sm_mfx = sm_mfx[0:5]
R_mean = 6.190486
sm_R2 = model.r_squared()
R_R2 = 0.1435323
npt.assert_allclose(sm_bw, R_bw, atol=1e-2)
npt.assert_allclose(sm_mean2, R_mean, atol=1e-2)
npt.assert_allclose(sm_R2, R_R2, atol=1e-2)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(italy_year, italy_gdp, 'o')
ax.plot(italy_year, sm_mean, '-')
plt.show()
| bsd-3-clause |
vigilv/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
JanSchulz/knitpy | knitpy/documents.py | 1 | 17172 | from __future__ import absolute_import, unicode_literals
import os
import tempfile
import re
from collections import OrderedDict
try:
#py3
from base64 import decodebytes
except ImportError:
# py2
from base64 import decodestring as decodebytes
from pypandoc import convert as pandoc
# Basic things from IPython
from traitlets.config.configurable import LoggingConfigurable
from traitlets import Bool, Unicode, CaselessStrEnum, List, Instance
from .py3compat import iteritems
from .utils import is_iterable, is_string
TEXT, OUTPUT, CODE, ASIS = "text", "output", "code", "asis"
IMAGE_MIMETYPE_TO_FILEEXTENSION = OrderedDict([("image/png","png"),
("image/svg+xml","svg"),
("image/jpeg","jpg"),
("application/pdf","pdf")])
IMAGE_FILEEXTENSION_TO_MIMETYPE = dict([(v,k) for k,v in iteritems(
IMAGE_MIMETYPE_TO_FILEEXTENSION)])
MARKUP_FORMAT_CONVERTER = OrderedDict([("text/markdown", "markdown"),
("text/x-markdown", "markdown"),
("text/html", "html"),
("text/latex", "latex")])
class KnitpyOutputException(Exception):
pass
# this is the intersection of what matplotlib supports (eps, pdf, pgf, png, ps, raw, rgba, svg,
# svgz) and what IPython supports ('png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf')...
_possible_image_formats = CaselessStrEnum(values=['pdf', 'png', 'svg'])
DEFAULT_FINAL_OUTPUT_FORMATS = [
{"name": "html_document", "alias": "html",
"pandoc_export_format": "html", "file_extension": "html",
"accepted_image_formats": ["png", "svg"]},
{"name": "word_document", "alias": "docx",
"pandoc_export_format": "docx", "file_extension": "docx",
"accepted_image_formats": ["png", "svg"]},
{"name": "pdf_document", "alias": "pdf",
"pandoc_export_format": "latex", "file_extension": "pdf",
"accepted_image_formats": ["pdf", "png"]},
{"name": "latex_document", "alias": "latex",
"pandoc_export_format": "latex", "file_extension": "tex",
"accepted_image_formats": ["pdf", "png"]},
]
VALID_OUTPUT_FORMAT_NAMES = [fmt["name"] for fmt in DEFAULT_FINAL_OUTPUT_FORMATS] + \
[fmt["alias"] for fmt in DEFAULT_FINAL_OUTPUT_FORMATS]
DEFAULT_OUTPUT_FORMAT_NAME = "html_document"
class FinalOutputConfiguration(LoggingConfigurable):
"""
This class holds configuration information about the final output document.
"""
name = Unicode("html_document", help="The name of this type of documents")
alias = Unicode("html", help="The alias of this type of documents")
pandoc_export_format = Unicode("html", help="The name of the pandoc export format")
file_extension = Unicode("html", help="The file extension")
keep_md = Bool(False, help="Whether to keep the temporary markdown file.")
accepted_image_formats = List(
trait=_possible_image_formats,
default_value=['png', 'svg'], # that's for html, which does not use pdf
config=False,
help="""The accepted image formats."""
)
# This is atomatically filled from accepted_image_formats
accepted_image_mimetypes = List(
config=False,
default_value=[IMAGE_FILEEXTENSION_TO_MIMETYPE[ifmt] for ifmt in ['png', 'jpg', 'svg']]
)
def _accepted_image_formats_changed(self, name, old, new):
if new != old:
converted = [IMAGE_FILEEXTENSION_TO_MIMETYPE[ifmt] for ifmt in new]
self.accepted_image_mimetypes = converted
def update(self, **config):
"""Update this
:param config: dict of properties to be updated
"""
for name, config_value in iteritems(config):
if hasattr(self, name):
setattr(self, name, config_value)
else:
self.log.error("Unknown config for document '%s': '%s:%s'. Ignored...",
self.name, name, config_value)
def copy(self):
"""Copy Constructor
:return: copy of self
"""
config = {}
for name in self.trait_names():
config[name] = getattr(self,name)
new_fod = type(self)(**config)
return new_fod
class TemporaryOutputDocument(LoggingConfigurable):
output_debug = Bool(False, config=True,
help="""Whether to print outputs to the (debug) log""")
# TODO: put loglevel to debug of this is True...
code_startmarker = Unicode("```{}", config=True,
help="Start of a code block, with language placeholder and "
"without linefeed")
code_endmarker = Unicode("```", config=True, help="end of a code block, without linefeed")
output_startmarker = Unicode("```", config=True,
help="Start of a output block, without linefeed")
output_endmarker = Unicode("```", config=True, help="End of a output block, without linefeed")
error_line = Unicode("**ERROR**: {}", config=True,
help="error message line, with msg placeholder and without linefeed")
export_config = Instance(klass=FinalOutputConfiguration, help="Final output document configuration")
plot_mimetypes = List(default_value=list(IMAGE_MIMETYPE_TO_FILEEXTENSION.keys()),
allow_none=False, config=True,
help="Mimetypes, which should be handled as plots.")
markup_mimetypes = List(default_value=list(MARKUP_FORMAT_CONVERTER.keys()),
allow_none=False, config=True,
help="Mimetypes, which should be handled as markeduped text")
context = Instance(klass="knitpy.knitpy.ExecutionContext", config=False, allow_none=True)
def __init__(self, fileoutputs, export_config, **kwargs):
super(TemporaryOutputDocument,self).__init__(**kwargs)
self._fileoutputs = fileoutputs
self.export_config = export_config
self._output = []
# Init the caching system (class variables cache the first output of a former conversion
# in future runs)
self._last_content = None
self._cache_text = []
self._cache_code = []
self._cache_code_language = None
self._cache_output = []
@property
def outputdir(self):
if not os.path.isdir(self._fileoutputs):
os.mkdir(self._fileoutputs)
self.log.info("Support files will be in %s", os.path.join(self._fileoutputs, ''))
return self._fileoutputs
@property
def plotdir(self):
plotdir_name = "figure-%s" % self.export_config.file_extension
plotdir = os.path.join(self.outputdir, plotdir_name)
if not os.path.isdir(plotdir):
os.mkdir(plotdir)
return plotdir
@property
def content(self):
self.flush()
return "".join(self._output)
# The caching system is needed to make fusing together same "type" of content possible
# -> code inputs without output should go to the same block
def _ensure_newline(self):
# don't add a newline before any output
if not self._output:
return
last_content = self._output[-1]
while last_content == "":
del self._output[-1]
last_content = self._output[-1]
if last_content[-1] != "\n":
self._output.append("\n")
def flush(self):
if self.output_debug:
self.log.debug("Flushing caches in output.")
if self._cache_text:
self._output.extend(self._cache_text)
self._cache_text = []
if self._cache_code:
self._ensure_newline()
self._output.append(self.code_startmarker.format(self._cache_code_language))
self._output.append("\n")
self._output.extend(self._cache_code)
self._ensure_newline()
self._output.append(self.code_endmarker)
self._output.append("\n")
self._cache_code = []
self._cache_code_language = None
if self._cache_output:
self._ensure_newline()
self._output.append(self.output_startmarker)
self._output.append("\n")
comment = self.context.comment
if comment:
comment = str(comment) + " "
outputs = "".join(self._cache_output)
outputs = outputs[:-1] if outputs[-1] == "\n" else outputs
outputs = outputs.split("\n")
outputs = [comment + line + "\n" for line in outputs]
self._output.extend(outputs)
else:
self._output.extend(self._cache_output)
self._ensure_newline()
self._output.append(self.output_endmarker)
self._output.append("\n")
self._cache_output = []
def _add_to_cache(self, content, content_type):
if is_string(content):
content = [content]
elif is_iterable(content):
pass
else:
content = [u"%s" % content]
# remove empty lines, which causes errors in _ensure_newline
content = [line for line in content if line != ""]
if self.output_debug:
if content_type == CODE:
_type = "%s (%s)" % (content_type, self._cache_code_language)
else:
_type = content_type
self.log.debug("Adding '%s': %s", _type, content)
if self._last_content and (content_type != self._last_content):
self.flush()
if self._output:
# make sure there is a empty line before the next differently formatted part,
# so that pandoc doesn't get confused...
# only add such a line if we are between our own generated content, i.e. between
# code and output or output and new code
_nl_between = [CODE, OUTPUT, ASIS]
if (self._last_content in _nl_between) and (content_type in _nl_between):
self._output.append("\n")
if content_type == CODE:
cache = self._cache_code
self._last_content = CODE
elif content_type == OUTPUT:
cache = self._cache_output
self._last_content = OUTPUT
elif content_type == ASIS:
# Just use text
cache = self._cache_text
self._last_content = ASIS
else:
cache = self._cache_text
self._last_content = TEXT
cache.extend(content)
def add_code(self, code, language="python"):
if self._cache_code_language and (language != self._cache_code_language):
self.flush()
self._cache_code_language = language
self._add_to_cache(code, CODE)
def add_output(self, output):
self._add_to_cache(output, OUTPUT)
def add_text(self, text):
self._add_to_cache(text, TEXT)
def add_asis(self, content):
self._add_to_cache(content, ASIS)
def add_image(self, mimetype, mimedata, title=""):
try:
mimedata = decodebytes(mimedata.encode())
# save as a file
if not self.context is None:
filename = u"%s-%s.%s" % (self.context.chunk_label,
self.context.chunk_plot_number,
IMAGE_MIMETYPE_TO_FILEEXTENSION[mimetype])
f = open(os.path.join(self.plotdir, filename), mode='w+b')
else:
self.log.info("Context no specified: using random filename for image")
f = tempfile.NamedTemporaryFile(suffix="."+IMAGE_MIMETYPE_TO_FILEEXTENSION[mimetype],
prefix='plot', dir=self.plotdir, mode='w+b',
delete=False)
f.write(mimedata)
f.close()
relative_name= "%s/%s/%s" % (self.outputdir, os.path.basename(self.plotdir),
os.path.basename(f.name))
self.log.info("Written file of type %s to %s", mimetype, relative_name)
template = ""
self.add_asis("\n")
self.add_asis(template % (title, relative_name))
self.add_asis("\n")
except Exception as e:
self.log.exception("Could not save a image")
raise KnitpyOutputException(str(e))
def add_markup_text(self, mimetype, mimedata):
# workaround for some pandoc weirdness:
# pandoc interprets html with indention as code and formats it with pre
# So remove all linefeeds/whitespace...
if mimetype == "text/html":
res= []
for line in mimedata.split("\n"):
res.append(line.strip())
mimedata = "".join(res)
# pandas adds multiple spaces if one element in a column is long, but the rest is
# short. Remove these spaces, as pandoc doesn't like them...
mimedata = re.sub(' +',' ', mimedata)
to_format = "markdown"
# try to convert to the current format so that it can be included "asis"
if not MARKUP_FORMAT_CONVERTER[mimetype] in [to_format,
self.export_config.pandoc_export_format]:
if "<table" in mimedata:
# There is a bug in pandoc <=1.13.2, where th in normal tr is triggers "only
# text" conversion.
msg = "Trying to fix tables for conversion with pandoc (bug in pandoc <=1.13.2)."
self.log.debug(msg)
mimedata = self._fix_html_tables_old_pandoc(mimedata)
try:
self.log.debug("Converting markup of type '%s' to '%s' via pandoc...",
mimetype, to_format)
mimedata = pandoc(mimedata, to=to_format, format=MARKUP_FORMAT_CONVERTER[mimetype])
except RuntimeError as e:
# these are pypandoc errors
msg = "Could not convert mime data of type '%s' to output format '%s'."
self.log.debug(msg, mimetype, to_format)
raise KnitpyOutputException(str(e))
except Exception as e:
msg = "Could not convert mime data of type '%s' to output format '%s'."
self.log.exception(msg, mimetype, to_format)
raise KnitpyOutputException(str(e))
self.add_asis("\n")
self.add_asis(mimedata)
self.add_asis("\n")
def _fix_html_tables_old_pandoc(self, htmlstring):
"""
Fix html tables, so that they are recognized by pandoc
pandoc in <=1.13.2 converts tables with '<th>' and <td> to plain text (each cell one
paragraph. Remove all <th> in later rows (tbody) by replacing it with <td>. This is
close to the same solution as taken by pandoc in 1.13.3 and later.
See also: https://github.com/jgm/pandoc/issues/2015
"""
result = []
pos = 0
re_tables = re.compile(r"<table.*</table>", re.DOTALL)
re_tbody = re.compile(r"<tbody.*</tbody>", re.DOTALL)
tables = re_tables.finditer(htmlstring)
for table in tables:
# process the html before the match
result.append(htmlstring[pos:table.start()])
# now the table itself
table_html = htmlstring[table.start():table.end()]
tbody = re_tbody.search(table_html)
if not tbody is None:
result.append(table_html[0:tbody.start()])
tbody_html = table_html[tbody.start():tbody.end()]
tbody_html = tbody_html.replace("<th","<td")
tbody_html = tbody_html.replace("</th>", "</td>")
result.append(tbody_html)
result.append(table_html[tbody.end():])
else:
result.append(table_html)
pos = table.end()
result.append(htmlstring[pos:])
return "".join(result)
def add_execution_error(self, error, details=""):
# adding an error is considered "not normal", so we make sure it is clearly visible
self.flush()
# Set this to None, so no newline is added by accident. We will handle newlines after
# the error in this code
self._last_content = None
# make sure there is a empty line before and after the error message
self._ensure_newline()
self._output.append("\n")
self._output.append(self.error_line.format(error))
self._output.append("\n\n")
if details:
self._output.append(self.output_startmarker)
self._output.append("\n")
self._output.append(details)
self._ensure_newline()
self._output.append(self.output_endmarker)
self._output.append("\n\n")
| bsd-3-clause |
jjx02230808/project0223 | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
google/nitroml | nitroml/benchmark/results.py | 1 | 9930 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""NitroML benchmark pipeline result overview."""
import datetime
import json
import re
from typing import Dict, Any, List, NamedTuple, Optional
from nitroml.benchmark import result as br
import pandas as pd
from ml_metadata import metadata_store
from ml_metadata.proto import metadata_store_pb2
# Column name constants
RUN_ID_KEY = 'run_id'
STARTED_AT = 'started_at'
BENCHMARK_FULL_KEY = 'benchmark_fullname'
ARTIFACT_ID_KEY = 'artifact_id'
# Component constants
_STATS = 'ExampleStatistics'
# Name constants
_NAME = 'name'
_PRODUCER_COMPONENT = 'producer_component'
_STATE = 'state'
_PIPELINE_NAME = 'pipeline_name'
_PIPELINE_ROOT = 'pipeline_root'
_RUN_ID = 'run_id'
_COMPONENT_ID = 'component_id'
# IR-Based TFXDagRunner constants
_IS_IR_KEY = 'is_ir'
# Default columns
_DEFAULT_COLUMNS = (STARTED_AT, RUN_ID_KEY,
br.BenchmarkResult.BENCHMARK_NAME_KEY,
br.BenchmarkResult.BENCHMARK_RUN_KEY,
br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY)
_DATAFRAME_CONTEXTUAL_COLUMNS = (STARTED_AT, RUN_ID_KEY, BENCHMARK_FULL_KEY,
br.BenchmarkResult.BENCHMARK_NAME_KEY,
br.BenchmarkResult.BENCHMARK_RUN_KEY,
br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY)
_DEFAULT_CUSTOM_PROPERTIES = {
_NAME, _PRODUCER_COMPONENT, _STATE, _PIPELINE_NAME
}
class _Result(NamedTuple):
"""Wrapper for properties and property names."""
properties: Dict[str, Dict[str, Any]]
property_names: List[str]
class _RunInfo(NamedTuple):
"""Wrapper for run id and component name."""
run_id: str = ''
component_name: str = ''
started_at: int = 0
def _merge_results(results: List[_Result]) -> _Result:
"""Merges _Result objects into one."""
properties = {}
property_names = []
for result in results:
for key, props in result.properties.items():
if key in properties:
properties[key].update(props)
else:
properties[key] = {**props}
property_names += result.property_names
return _Result(properties=properties, property_names=property_names)
def _to_pytype(val: str) -> Any:
"""Coverts val to python type."""
try:
return json.loads(val.lower())
except ValueError:
return val
def _parse_value(value: metadata_store_pb2.Value) -> Any:
"""Parse value from `metadata_store_pb2.Value` proto."""
if value.HasField('int_value'):
return value.int_value
elif value.HasField('double_value'):
return value.double_value
else:
return _to_pytype(value.string_value)
def _get_artifact_run_info_map(store: metadata_store.MetadataStore,
artifact_ids: List[int]) -> Dict[int, _RunInfo]:
"""Returns a dictionary mapping artifact_id to its MyOrchestrator run_id.
Args:
store: MetaDataStore object to connect to MLMD instance.
artifact_ids: A list of artifact ids to load.
Returns:
A dictionary containing artifact_id as a key and MyOrchestrator run_id as value.
"""
# Get events of artifacts.
events = store.get_events_by_artifact_ids(artifact_ids)
exec_to_artifact = {}
for event in events:
exec_to_artifact[event.execution_id] = event.artifact_id
# Get execution of artifacts.
executions = store.get_executions_by_id(list(exec_to_artifact.keys()))
artifact_to_run_info = {}
for execution in executions:
run_id = execution.properties[RUN_ID_KEY].string_value
component = execution.properties[_COMPONENT_ID].string_value
artifact_id = exec_to_artifact[execution.id]
artifact_to_run_info[artifact_id] = _RunInfo(
run_id=run_id,
component_name=component,
started_at=execution.create_time_since_epoch)
return artifact_to_run_info
def _get_benchmark_results(store: metadata_store.MetadataStore) -> _Result:
"""Returns the benchmark results of the BenchmarkResultPublisher component.
Args:
store: MetaDataStore object to connect to MLMD instance.
Returns:
A _Result objects with properties containing benchmark results.
"""
metrics = {}
property_names = set()
publisher_artifacts = store.get_artifacts_by_type(
br.BenchmarkResult.TYPE_NAME)
for artifact in publisher_artifacts:
evals = {}
for key, val in artifact.custom_properties.items():
evals[key] = _parse_value(val)
# Change for the IR world.
if key == 'name':
new_id = _parse_value(val).split(':')
if len(new_id) > 2:
evals[RUN_ID_KEY] = new_id[1]
property_names = property_names.union(evals.keys())
metrics[artifact.id] = evals
artifact_to_run_info = _get_artifact_run_info_map(store, list(metrics.keys()))
properties = {}
for artifact_id, evals in metrics.items():
run_info = artifact_to_run_info[artifact_id]
started_at = run_info.started_at // 1000
evals[STARTED_AT] = datetime.datetime.fromtimestamp(started_at)
if RUN_ID_KEY not in metrics[artifact_id]:
# Non-IR based runner.
continue
run_id = metrics[artifact_id][RUN_ID_KEY]
result_key = run_id + '.' + evals[br.BenchmarkResult.BENCHMARK_NAME_KEY]
if result_key in properties:
properties[result_key].update(evals)
else:
properties[result_key] = {**evals}
property_names = property_names.difference(
{_NAME, _PRODUCER_COMPONENT, _STATE, *_DEFAULT_COLUMNS, _IS_IR_KEY})
return _Result(properties=properties, property_names=sorted(property_names))
def get_statisticsgen_dir_list(
store: metadata_store.MetadataStore) -> List[str]:
"""Obtains a list of statisticsgen_dir from the store."""
stats_artifacts = store.get_artifacts_by_type(_STATS)
stat_dirs_list = [artifact.uri for artifact in stats_artifacts]
return stat_dirs_list
def _make_dataframe(metrics_list: List[Dict[str, Any]],
columns: List[str]) -> pd.DataFrame:
"""Makes pandas.DataFrame from metrics_list."""
df = pd.DataFrame(metrics_list)
if not df.empty:
# Reorder columns.
# Strip benchmark run repetition for aggregation.
df[BENCHMARK_FULL_KEY] = df[br.BenchmarkResult.BENCHMARK_NAME_KEY]
df[br.BenchmarkResult.BENCHMARK_NAME_KEY] = df[
br.BenchmarkResult.BENCHMARK_NAME_KEY].apply(
lambda x: re.sub(r'\.run_\d_of_\d$', '', x))
key_columns = list(_DATAFRAME_CONTEXTUAL_COLUMNS)
if br.BenchmarkResult.BENCHMARK_RUN_KEY not in df:
key_columns.remove(br.BenchmarkResult.BENCHMARK_RUN_KEY)
if br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY not in df:
key_columns.remove(br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY)
df = df[key_columns + columns]
df = df.set_index([STARTED_AT])
return df
def _aggregate_results(df: pd.DataFrame,
metric_aggregators: Optional[List[Any]],
groupby_columns: List[str]):
"""Aggregates metrics in an overview pd.DataFrame."""
df = df.copy()
groupby_columns = groupby_columns.copy()
if br.BenchmarkResult.BENCHMARK_RUN_KEY in df:
df = df.drop([br.BenchmarkResult.BENCHMARK_RUN_KEY], axis=1)
groupby_columns.remove(br.BenchmarkResult.BENCHMARK_RUN_KEY)
groupby_columns.remove(BENCHMARK_FULL_KEY)
if br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY not in df:
groupby_columns.remove(br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY)
# Group by contextual columns and aggregate metrics.
df = df.groupby(groupby_columns)
df = df.agg(metric_aggregators)
# Flatten MultiIndex into a DataFrame.
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df.reset_index().set_index('started_at')
def overview(
store: metadata_store.MetadataStore,
metric_aggregators: Optional[List[Any]] = None,
) -> pd.DataFrame:
"""Returns a pandas.DataFrame containing hparams and evaluation results.
This method assumes that `tf.enable_v2_behavior()` was called beforehand.
It loads results for all evaluation therefore method can be slow.
TODO(b/151085210): Allow filtering incomplete benchmark runs.
Assumptions:
For the given pipeline, MyOrchestrator run_id and component_id of trainer is unique
and (my_orchestrator_run_id + trainer.component_id-postfix) is equal to
(my_orchestrator_run_id + artifact.producer_component-postfix).
Args:
store: MetaDataStore object for connecting to an MLMD instance.
metric_aggregators: Iterable of functions and/or function names, e.g.
[np.sum, 'mean']. Groups individual runs by their contextual features (run
id, hparams), and aggregates metrics by the given functions. If a
function, must either work when passed a DataFrame or when passed to
DataFrame.apply.
Returns:
A pandas DataFrame with the loaded hparams and evaluations or an empty one
if no evaluations and hparams could be found.
"""
result = _get_benchmark_results(store)
# Filter metrics that have empty hparams and evaluation results.
results_list = [
result for result in result.properties.values()
if len(result) > len(_DEFAULT_COLUMNS)
]
df = _make_dataframe(results_list, result.property_names)
if metric_aggregators:
return _aggregate_results(
df,
metric_aggregators=metric_aggregators,
groupby_columns=list(_DATAFRAME_CONTEXTUAL_COLUMNS))
return df
| apache-2.0 |
kaiserroll14/301finalproject | main/pandas/computation/engines.py | 15 | 3732 | """Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas import compat
from pandas.compat import DeepChainMap, map
from pandas.core import common as com
from pandas.computation.align import _align, _reconstruct_object
from pandas.computation.ops import UndefinedVariableError, _mathops, _reductions
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "%s" overlap with '
'numexpr builtins: (%s)' % (expr, s))
class AbstractEngine(object):
"""Object serving as a base class for all engines."""
__metaclass__ = abc.ABCMeta
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return com.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super(NumExprEngine, self).__init__(expr)
def convert(self):
return str(super(NumExprEngine, self).convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = compat.text_type(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super(PythonEngine, self).__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| gpl-3.0 |
Kruehlio/MUSEspec | spectrum3d.py | 1 | 19440 | # -*- coding: utf-8 -*-
""" Spectrum class for 3d-spectra. Particularly MUSE."""
import warnings
import multiprocessing
import logging
import sys
import signal
import pyfits
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from MUSEspec.utils.astro import (
LDMP, Avlaws, airtovac, ergJy, abflux, getebv)
from MUSEspec.analysis.functions import (deg2sexa, sexa2deg, ccmred)
from MUSEspec.analysis.maps import (
getDens, getSFR, getOH, getIon, getEW, getBPT, getEBV, getVel,
getSeg, getRGB, getTemp, getOHT, getQ)
from MUSEspec.analysis.extract import (
extract1d, extract2d, extract3d, subtractCont, getGalcen,
cutCube, extractCont, RESTWL)
from MUSEspec.analysis.analysis import (metGrad, voronoi_bin, anaSpec)
from MUSEspec.utils.starlight import runStar, subStars, subAllStars
from MUSEspec.MUSEio.museio import (
pdfout, fitsout, asciiout, cubeout, plotspec, distout, asciiin)
warnings.filterwarnings("ignore")
LOG_FMT = '%(levelname)s [%(asctime)s]: %(message)s'
DATE_FMT = '%Y-%m-%d %H:%M:%S'
FMT = logging.Formatter(fmt=LOG_FMT, datefmt=DATE_FMT)
LOGGER = logging.getLogger('__main__')
logging.root.setLevel(logging.DEBUG)
CH = logging.StreamHandler() # console handler
CH.setFormatter(FMT)
LOGGER.handlers = []
LOGGER.addHandler(CH)
def signal_handler(signal, frame):
""" Killing from the command line """
sys.exit("CTRL+C detected, stopping execution")
signal.signal(signal.SIGINT, signal_handler)
class Spectrum3d(object):
""" Fits cube class for data exploration, analysis, modelling.
Arguments:
inst: Instrument that produced the spectrum (optional, default=MUSE)
filen: Filename of fits data cube
target: Target of observation (for output file)
Methods:
setFiles: Sets fits files
setRedshift: Given an redshift z, sets cosmological parameters
ebvGal: Uses fits header keywords RA,DEC to get Galactic Forground EB-V
ebvCor: Corrects a given line for the EB-V map
checkPhot: Checks the flux calibration through synthetic phtometry
subtractCont: Subtracts continuum of plane
getCont: Measures continuum of plane
getSFR: Calculates the SFR density map based on Halpha
getOH: Calculates oxygen abundance map based on strong line diagnostics
getIon: Calculates [OIII]/Hbeta map as ionization/excitation proxy
getEW: Calculates equivalent width maps of given line
getEBV: Calculates EB-V maps from Balmer decrement
BPT: Spaxels in the Baldwich-Philips-Terlevich diagram
extractCube: Extracts cube cut in wavelength
extractPlane: Extracts a plane, summed in wavelength
extrSpec: Extracts a spectrum at given position
astro: Corrects fits file astrometry
wltopix: Wavelength to pixel conversion
pixtowl: Pixel to wavelength conversion
skytopix: Sky coordinates conversion (degree) to pixel conversion
pixtosky: Pixel to sky coordinates conversion (degree)
sexatopix: Sky coordinates conversion (sexagesimal) to pixel conversion
pixtosexa: Pixel to sky coordinates conversion (sexagesimal)
velMap: Calculates velocity map - clumsy, takes ages
hiidetect: HII region detection algorithm (work in progress)
rgb: Provided three planes, creates an RGP image
scaleCube: Scale cube by polynomial of given degree (default 1)
getGalcen: Get x and y index of center of galaxy (star light)
"""
def __init__(self, filen=None, inst='MUSE', target='', verbose=0):
self.inst = inst
self.z = None
self.datfile = ''
self.target = target
self.mask = None
self.skymask = None
self.maskpix = None
self.ebvmap = None
self.ebvGalCorr = 0
self.objmask = None
self.verbose = verbose
self.scale = []
self.LDMP = 0
self.AngD = 0
if filen is not None:
self.setFiles(filen)
self.ncores = multiprocessing.cpu_count()/2
LOGGER.info('Using %i cores for analysis', self.ncores)
def setFiles(self, filen, fluxmult=1, dAxis=3, mult=1, data=True, ext=1):
""" Uses pyfits to set the header, data and error as instance attributes.
The fits file should have at least two extension, where the first
contains the data, the second the variance. Returns nothing.
Parameters
----------
filen : str
required, this is the filname of the fitsfile to be read in
"""
# Get primary header
self.headprim = pyfits.getheader(filen, 0)
# Get header of data extension
self.head = pyfits.getheader(filen, ext)
if data is True:
# Read in data
self.data = pyfits.getdata(filen, ext) * fluxmult
self.starcube = np.zeros(self.data.shape, dtype='>f4')
try:
self.headerro = pyfits.getheader(filen, 2)
# Read in variance and turn into stdev
if data is True:
self.erro = pyfits.getdata(filen, 2)**0.5 * fluxmult
except IndexError:
pass
wlkey, wlstart = 'NAXIS%i' % dAxis, 'CRVAL%i' % dAxis
wlinc, wlpixst = 'CD%i_%i' % (dAxis, dAxis), 'CRPIX%i' % dAxis
pix = np.arange(self.head[wlkey]) + self.head[wlpixst]
self.pix = pix
# Create wave array from fits info
self.wave = airtovac(self.head[wlstart] + (pix - 1) * self.head[wlinc])
self.wave *= mult
self.pixsky = \
(self.head['CD1_1']**2 + self.head['CD1_2']**2) ** 0.5 * 3600
self.lenx = self.head['NAXIS1']
self.leny = self.head['NAXIS2']
self.wlinc = self.head[wlinc] * mult
if self.target == '':
self.target = self.headprim['OBJECT']
self.fluxunit = self.head['BUNIT']
self.output = self.headprim['OBJECT']
self.base, self.ext = filen.split('.fits')[0], '.fits'
LOGGER.info('Fits cube loaded %s', filen)
LOGGER.info('Wavelength range %.1f - %.1f (vacuum)',
self.wave[0], self.wave[-1])
def setRedshift(self, z):
""" Setting luminosity distance and angular seperation here, provided
a given redshift z. Returns nothing.
Parameters
----------
z : float
required, this is the redshift of the source
"""
LD, angsep = LDMP(z, v=2)
LOGGER.info('Luminosity distance at z=%.4f: %.2f MPc', z, LD)
LOGGER.info('Luminosity distance at z=%.4f: %.2e cm',
z, LD * 3.0857E24)
self.z = z
self.LDMP = LD * 3.0857E24
self.AngD = angsep
def ebvGal(self, ebv='', rv=3.08):
""" If user does not provide ebv, it uses the header information of the
pointing to obtain the Galactic
foreground reddening from IRSA. These are by default the Schlafly and
Finkbeiner values. Immediatly dereddens the data and error using rv
(default 3.08) in place. Returns nothing.
Parameters
----------
ebv : float
default '', and queries the web given header RA and DEC
rv : float
default 3.08, total to selective reddening RV
"""
if ebv == '':
ra, dec = self.headprim['RA'], self.headprim['DEC']
ebv, std, ref, av = getebv(ra, dec, rv)
ebvcorr = ccmred(self.wave, ebv, rv)
LOGGER.info('Dereddening data using MW E_B-V = %.3f mag', ebv)
self.data *= ebvcorr[:, np.newaxis, np.newaxis]
try:
self.erro *= ebvcorr[:, np.newaxis, np.newaxis]
except AttributeError:
pass
self.ebvGalCorr = ebv
def ebvCor(self, line, rv=3.08, redlaw='mw', ebv=None):
""" Uses a the instance attribut ebvmap, the previously calculated map
of host reddening to calulate a correction map for a given line.
Parameters
----------
line : str
default '', for example ha for Halpha
rv : float
default 3.08, total to selective reddening RV
redlaw : str
default mw, assumed reddening law
Returns
-------
ebvcorr : np.array
The correction map to be applied to the linefluxes to correct
for the galaxy's dust absorption
"""
WL = RESTWL[line.lower()]/10.
if ebv is not None:
ebvcalc = ebv
ebvcorr = 1./np.exp(-1./1.086*ebvcalc * rv * Avlaws(WL, redlaw))
elif len(self.ebvmap) is not None:
ebvcalc = self.ebvmap
ebvcorr = 1./np.exp(-1./1.086*ebvcalc * rv * Avlaws(WL, redlaw))
ebvcorr[np.isnan(ebvcorr)] = 1
ebvcorr[ebvcorr < 1] = 1
else:
LOGGER.error('Need an ebv or EBV-map / create via getEBV !!!')
raise SystemExit
return ebvcorr
def checkPhot(self, mag, band='r', ra=None, dec=None, radius=7,
magerr=1E-3):
""" Uses synthetic photometry at a given position in a given band at a
given magnitude to check the flux calibration of the spectrum.
Returns nothing.
Parameters
----------
mag : float
default '', required magnitude of comparison
band : str
default r, photometric filter. VRI are assumed to be in Vega, griz
in the AB system
ra : float
default None, Right Ascension of comparison star
dec : float
default None, Declination of comparison star. If ra and/or dec are
None, uses the spectrum of the full cube
radius : int
Radius in pixel around ra/dec for specturm extraction
"""
ABcorD = {'V': 0.00, 'r': 0.178, 'R': 0.21, 'i': 0.410, 'I': 0.45,
'z': 0.543}
wls = {'r': [5500., 6832.], 'i': [7000., 7960.],
'V': [4920.9, 5980.2], 'R': [5698.9, 7344.4],
'I': [7210., 8750.], 'F814': [6884.0, 9659.4],
'z': [8250.0, 9530.4]}
if band in 'VRI':
mag = mag + ABcorD[band]
if ra is not None and dec is not None:
if self.verbose > 0:
LOGGER.info('Star at: %s, %s', ra, dec)
wl, spec, err = self.extrSpec(ra=ra, dec=dec, radius=radius)
else:
wl, spec, err = self.extrSpec(total=True)
bandsel = (wl > wls[band][0]) * (wl < wls[band][1])
avgflux = np.nanmedian(spec[bandsel])*1E-20
avgwl = np.nanmedian(wl[bandsel])
fluxspec = ergJy(avgflux, avgwl)
fluxref = abflux(mag)
LOGGER.info('Scale from spectrum to photometry for %s-band: %.3f',
band, fluxref/fluxspec)
self.scale.append([avgwl, fluxref/fluxspec, magerr])
def scaleCube(self, deg=1):
""" Fits a polynomial of degree deg to the previously calculated scale-
factors at a given wavelength, and modifies the data with the derived
correction curve. Returns nothing, but modifies data and error instance
attribute in place.
Parameters
----------
deg : int
default 1, required degree of fit
"""
if self.scale != []:
sfac = np.array(self.scale)[:, 1]
wls = np.array(self.scale)[:, 0]
err = np.array(self.scale)[:, 2]
b = np.polyfit(x=wls, y=sfac, w=1./err, deg=deg)
LOGGER.info('Scaling spectrum by ploynomial of degree ' +
'%i to %i photometric points', deg, len(sfac))
LOGGER.info('Linear term %.e', b[0])
p = np.poly1d(b)
corrf = p(self.wave)
self.data *= corrf[:, np.newaxis, np.newaxis]
self.erro *= corrf[:, np.newaxis, np.newaxis]
fig1 = plt.figure(figsize=(6, 4.2))
fig1.subplots_adjust(bottom=0.15, top=0.97, left=0.13, right=0.96)
ax1 = fig1.add_subplot(1, 1, 1)
ax1.errorbar(wls, sfac, yerr=err, capsize=0,
ms=8, fmt='o', color='firebrick')
ax1.plot(self.wave, corrf, '-', color='black')
ax1.plot(self.wave, np.ones(len(corrf)), '--', color='black')
ax1.set_xlabel(r'$\rm{Observed\,wavelength\,(\AA)}$', fontsize=18)
ax1.set_ylabel(r'$\rm{Correction\, factor}$', fontsize=18)
ax1.set_xlim(4650, 9300)
fig1.savefig('%s_%s_photcorr.pdf' % (self.inst, self.target))
plt.close(fig1)
else:
LOGGER.warning("No scaling performed")
LOGGER.warning("Calculate scaling first with checkPhot")
def astro(self, starras=None, stardecs=None, ras=None, decs=None,
starxs=None, starys=None):
"""Correct MUSE astrometry: Starra and stardec are lists of the original
coordinates of a source in the MUSE cube with actual coordinates ra,
dec.
Returns nothing, but changes the header keywords CRVAL1 and CRVAL2 in
the instance attribute head. Can only correct a translation mismath,
no rotation, plate scale changes (should be low). Length of
starras, stardecs, ras, decs must of course be equal for the code to
make sense.
Parameters
----------
starras : list
List of right ascension positions in cube of reference stars
stardecs : list
List of declination positions in cube of reference stars
ras : list
List of right ascension true positions of reference stars
decs : list
List of declinations true positions of reference stars
"""
if starxs is not None and starys is not None:
for starx, stary in zip(starxs, starys):
ra, dec = self.pixtosky(starx, stary)
starras.append(ra)
stardecs.append(dec)
if len(starras) != len(stardecs) or len(ras) != len(decs) or \
len(starras) != len(ras):
LOGGER.error('Input lists must be of equal length')
dra, ddec = np.array([]), np.array([])
for starra, stardec, ra, dec in zip(starras, stardecs, ras, decs):
starra, stardec = sexa2deg(starra, stardec)
ra, dec = sexa2deg(ra, dec)
dra = np.append(dra, starra - ra)
ddec = np.append(ddec, stardec - dec)
dram, ddecm = np.average(dra), np.average(ddec)
LOGGER.info('Changing astrometry by %.1f" %.1f"',
dram*3600, ddecm*3600)
LOGGER.info('RMS astrometry %.3f" %.3f"',
np.std(dra)*3600, np.std(ddec)*3600)
self.head['CRVAL1'] -= dram
self.head['CRVAL2'] -= ddecm
def getCont(self, pix1, pix2, dx=15):
cont1 = np.nanmedian(self.data[pix1-dx:pix1], axis=0)
cont2 = np.nanmedian(self.data[pix2:pix2+dx], axis=0)
return np.nanmean(np.array([cont1, cont2]), axis=0)
def getDens(self, **kwargs):
return getDens(self, **kwargs)
def anaSpec(self, **kwargs):
return anaSpec(self, **kwargs)
def metGrad(self, **kwargs):
metGrad(self, **kwargs)
def getSFR(self, **kwargs):
return getSFR(self, **kwargs)
def getOH(self, **kwargs):
return getOH(self, **kwargs)
def getOHT(self, toiii, toii, siii, **kwargs):
return getOHT(self, toiii, toii, siii, **kwargs)
def getQ(self, **kwargs):
return getQ(self, **kwargs)
def getIon(self, meth='S', **kwargs):
return getIon(self, meth=meth, **kwargs)
def getGalcen(self, **kwargs):
return getGalcen(self, **kwargs)
def extractCont(self, line, **kwargs):
return extractCont(self, line, **kwargs)
def getTemp(self, meth='SIII', **kwargs):
return getTemp(self, meth=meth, **kwargs)
def getEW(self, line, **kwargs):
return getEW(self, line, **kwargs)
def getEBV(self, **kwargs):
return getEBV(self, **kwargs)
def BPT(self, **kwargs):
getBPT(self, **kwargs)
def velMap(self, **kwargs):
return getVel(self, **kwargs)
def hiidetect(self, plane, **kwargs):
return getSeg(self, plane, **kwargs)
def rgb(self, planes, **kwargs):
return getRGB(planes, **kwargs)
def runStar(self, ascii, **kwargs):
return runStar(self, ascii, **kwargs)
def subStars(self, x, y, **kwargs):
subStars(self, x, y, **kwargs)
def subAllStars(self, **kwargs):
subAllStars(self, **kwargs)
def pdfout(self, plane, **kwargs):
pdfout(self, plane, **kwargs)
def distout(self, plane, minx, maxx, dx, **kwargs):
distout(self, plane, minx, maxx, dx, **kwargs)
def fitsout(self, plane, **kwargs):
fitsout(self, plane, **kwargs)
def cubeout(self, cube, **kwargs):
cubeout(self, cube, **kwargs)
def asciiout(self, wl, spec, **kwargs):
return asciiout(self, wl, spec, **kwargs)
def asciiin(self, ascii, **kwargs):
return asciiin(self, ascii, **kwargs)
def plotspec(self, wl, spec, **kwargs):
return plotspec(self, wl, spec, **kwargs)
def subCube(self, **kwargs):
return extract3d(self, **kwargs)
def cutCube(self, **kwargs):
return cutCube(self, **kwargs)
def extractCube(self, **kwargs):
return extract3d(self, **kwargs)
def extractPlane(self, **kwargs):
return extract2d(self, **kwargs)
def extrSpec(self, **kwargs):
return extract1d(self, **kwargs)
def voronoi_bin(self, **kwargs):
return voronoi_bin(**kwargs)
def subtractCont(self, plane, pix1, pix2, cpix1, cpix2, dx=10):
return subtractCont(self, plane, pix1, pix2, cpix1, cpix2, dx=dx)
def wltopix(self, wl):
""" Converts wavelength as input into nearest integer pixel value """
pix = ((wl - self.wave[0]) / self.wlinc)
return max(0, int(round(pix)))
def pixtowl(self, pix):
""" Converts pixel into wavelength """
return self.wave[pix-1]
def pixtosky(self, x, y):
""" Converts x, y positions into ra, dec in degree """
dx = x - self.head['CRPIX1']
dy = y - self.head['CRPIX2']
decdeg = self.head['CRVAL2'] + self.head['CD2_2'] * dy
radeg = self.head['CRVAL1'] + (self.head['CD1_1'] * dx) /\
np.cos(decdeg * np.pi/180.)
return radeg, decdeg
def skytopix(self, ra, dec):
""" Converts ra, dec positions in degrees into x, y
x, y in python format, starts with 0
"""
y = (dec - self.head['CRVAL2']) / self.head['CD2_2'] + \
self.head['CRPIX2']
x = ((ra - self.head['CRVAL1']) / self.head['CD1_1']) * \
np.cos(dec * np.pi/180.) + self.head['CRPIX1']
return (int(round(x-1)), int(round(y-1)))
def pixtosexa(self, x, y):
""" Converts x, y positions into ra, dec in sexagesimal """
ra, dec = self.pixtosky(x, y)
x, y = deg2sexa(ra, dec)
return (x, y)
def sexatopix(self, ra, dec):
""" Converts ra, dec positions in sexagesimal into x, y """
ra, dec = sexa2deg(ra, dec)
x, y = self.skytopix(ra, dec)
return (int(round(x)), int(round(y)))
| mit |
robin-lai/scikit-learn | examples/classification/plot_classifier_comparison.py | 66 | 4895 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
superbeckgit/dstauffman | stats.py | 1 | 20541 | # -*- coding: utf-8 -*-
r"""
Stats module file for the "dstauffman" library. It contains generic statistics related routines
that can be independently defined and used by other modules.
Notes
-----
#. Written by David C. Stauffer in December 2015.
"""
# pylint: disable=E1101, C0301, C0103
#%% Imports
from collections import OrderedDict
import doctest
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import unittest
from dstauffman.constants import MONTHS_PER_YEAR
from dstauffman.plotting import Opts, setup_plots
#%% Functions - convert_annual_to_monthly_probability
def convert_annual_to_monthly_probability(annual):
r"""
Converts a given annual probabily into the equivalent monthly one.
Parameters
----------
annual : numpy.ndarray
annual probabilities, 0 <= annual <= 1
Returns
-------
monthly : numpy.ndarray
equivalent monthly probabilities, 0 <= monthly <= 1
Raises
------
ValueError
Any probabilities outside of the [0, 1] range
Notes
-----
#. Checks for boundary cases to avoid a divide by zero warning
Examples
--------
>>> from dstauffman import convert_annual_to_monthly_probability
>>> import numpy as np
>>> annual = np.array([0, 0.1, 1])
>>> monthly = convert_annual_to_monthly_probability(annual)
>>> print(monthly) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.00874161 1. ]
"""
# check ranges
if np.any(annual < 0):
raise ValueError('annual must be >= 0')
if np.any(annual > 1):
raise ValueError('annual must be <= 1')
# convert to equivalent probability and return result
monthly = 1-np.exp(np.log(1-annual)/MONTHS_PER_YEAR)
return monthly
#%% Functions - convert_monthly_to_annual_probability
def convert_monthly_to_annual_probability(monthly):
r"""
Converts a given monthly probability into the equivalent annual one.
Parameters
----------
monthly : numpy.ndarray
equivalent monthly probabilities, 0 <= monthly <= 1
Returns
-------
annual : numpy.ndarray
annual probabilities, 0 <= annual <= 1
Examples
--------
>>> from dstauffman import convert_monthly_to_annual_probability
>>> import numpy as np
>>> monthly = np.array([0, 0.1, 1])
>>> annual = convert_monthly_to_annual_probability(monthly)
>>> print(annual) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.71757046 1. ]
"""
# check ranges
if np.any(monthly < 0):
raise ValueError('monthly must be >= 0')
if np.any(monthly > 1):
raise ValueError('annual must be <= 1')
# convert to equivalent probability and return result
annual = 1 - (1 - monthly)**MONTHS_PER_YEAR
return annual
#%% Functions - ca2mp & cm2ap aliases
ca2mp = convert_annual_to_monthly_probability
cm2ap = convert_monthly_to_annual_probability
#%% Functions - prob_to_rate
def prob_to_rate(prob, time=1):
r"""
Converts a given probability and time to a rate.
Parameters
----------
prob : numpy.ndarray
Probability of event happening over the given time
time : float
Time for the given probability in years
Returns
-------
rate : numpy.ndarray
Equivalent annual rate for the given probability and time
Notes
-----
#. Written by David C. Stauffer in January 2016.
Examples
--------
>>> from dstauffman import prob_to_rate
>>> import numpy as np
>>> prob = np.array([0, 0.1, 1])
>>> time = 3
>>> rate = prob_to_rate(prob, time)
>>> print(rate) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.03512017 inf]
"""
# check ranges
if np.any(prob < 0):
raise ValueError('Probability must be >= 0')
if np.any(prob > 1):
raise ValueError('Probability must be <= 1')
# calculate rate
rate = -np.log(1 - prob) / time
# prevent code from returning a bunch of negative zeros when prob is exactly 0
rate += 0.
return rate
#%% Functions - rate_to_prob
def rate_to_prob(rate, time=1):
r"""
Converts a given rate and time to a probability.
Parameters
----------
rate : numpy.ndarray
Annual rate for the given time
time : float
Time period for the desired probability to be calculated from, in years
Returns
-------
prob : numpy.ndarray
Equivalent probability of event happening over the given time
Notes
-----
#. Written by David C. Stauffer in January 2016.
Examples
--------
>>> from dstauffman import rate_to_prob
>>> import numpy as np
>>> rate = np.array([0, 0.1, 1, 100, np.inf])
>>> time = 1./12
>>> prob = rate_to_prob(rate, time)
>>> print(prob) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.00829871 0.07995559 0.99975963 1. ]
"""
# check ranges
if np.any(rate < 0):
raise ValueError('Rate must be >= 0')
# calculate probability
prob = 1 - np.exp(-rate * time)
return prob
#%% Functions - month_prob_mult_ratio
def month_prob_mult_ratio(prob, ratio):
r"""
Multiplies a monthly probability by a given risk or hazard ratio.
Parameters
----------
prob : numpy.ndarray
Probability of event happening over one month
ratio : float
Multiplication ratio to apply to probability
Returns
-------
mult_prob : numpy.ndarray
Equivalent multiplicative monthly probability
Notes
-----
#. Written by David C. Staufer in January 2016.
Examples
--------
>>> from dstauffman import month_prob_mult_ratio
>>> import numpy as np
>>> prob = np.array([0, 0.1, 1])
>>> ratio = 2
>>> mult_prob = month_prob_mult_ratio(prob, ratio)
>>> print(mult_prob) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.19 1. ]
>>> ratio = 0.5
>>> mult_prob = month_prob_mult_ratio(prob, ratio)
>>> print(mult_prob) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.0513167 1. ]
"""
# convert the probability to a rate
rate = prob_to_rate(prob, time=1./MONTHS_PER_YEAR)
# scale the rate
mult_rate = rate * ratio
# convert back to a probability
mult_prob = rate_to_prob(mult_rate, time=1./MONTHS_PER_YEAR)
return mult_prob
#%% Functions - annual_rate_to_monthly_probability
def annual_rate_to_monthly_probability(rate):
r"""
Converts a given annual rate to a monthly probability.
Parameters
----------
rate : numpy.ndarray
Annual rate
Returns
-------
prob : numpy.ndarray
Equivalent monthly probability
Notes
-----
#. Written by David C. Stauffer in January 2016.
See Also
--------
rate_to_prob
Examples
--------
>>> from dstauffman import annual_rate_to_monthly_probability
>>> import numpy as np
>>> rate = np.array([0, 0.5, 1, 5, np.inf])
>>> prob = annual_rate_to_monthly_probability(rate)
>>> print(prob) # doctest: +NORMALIZE_WHITESPACE
[ 0. 0.04081054 0.07995559 0.34075937 1. ]
"""
# divide rate and calculate probability
prob = rate_to_prob(rate/MONTHS_PER_YEAR)
return prob
#%% Functions - monthly_probability_to_annual_rate
def monthly_probability_to_annual_rate(prob):
r"""
Converts a given monthly probability to an annual rate.
Parameters
----------
prob : numpy.ndarray
Monthly probability
Returns
-------
rate : numpy.ndarray
Equivalent annual rate
Notes
-----
#. Written by David C. Stauffer in April 2016.
See Also
--------
prob_to_rate
Examples
--------
>>> from dstauffman import monthly_probability_to_annual_rate
>>> import numpy as np
>>> prob = np.array([0, 0.04081054, 0.07995559, 0.34075937, 1])
>>> rate = monthly_probability_to_annual_rate(prob)
>>> print(' '.join(('{:.2f}'.format(x) for x in rate))) # doctest: +NORMALIZE_WHITESPACE
0.00 0.50 1.00 5.00 inf
"""
# divide rate and calculate probability
rate = prob_to_rate(prob, time=1/MONTHS_PER_YEAR)
return rate
#%% Functions - ar2mp
ar2mp = annual_rate_to_monthly_probability
mp2ar = monthly_probability_to_annual_rate
#%% Functions - combine_sets
def combine_sets(n1, u1, s1, n2, u2, s2):
r"""
Combines the mean and standard deviations for two non-overlapping sets of data.
This function combines two non-overlapping data sets, given a number of samples, mean
and standard deviation for the two data sets. It first calculates the total number of samples
then calculates the total mean using a weighted average, and then calculates the combined
standard deviation using an equation found on wikipedia. It also checks for special cases
where either data set is empty or if only one total point is in the combined set.
Parameters
----------
n1 : float
number of points in data set 1
u1 : float
mean of data set 1
s1 : float
standard deviation of data set 1
n2 : float
number of points in data set 2
u2 : float
mean of data set 2
s2 : float
standard deviation of data set 2
Returns
-------
n : float,
number of points in the combined data set
u : float,
mean of the combined data set
s : float,
standard deviation of the combined data set
See Also
--------
np.mean, np.std
References
----------
#. http://en.wikipedia.org/wiki/Standard_deviation#Sample-based_statistics, on 8/7/12
Notes
-----
#. Written in Matlab by David C. Stauffer in Jul 2012.
#. Ported to Python by David C. Stauffer in May 2015.
#. Could be expanded to broadcast and handle array inputs.
Examples
--------
>>> from dstauffman import combine_sets
>>> n1 = 5
>>> u1 = 1
>>> s1 = 0.5
>>> n2 = 10
>>> u2 = 2
>>> s2 = 0.25
>>> (n, u, s) = combine_sets(n1, u1, s1, n2, u2, s2)
>>> print(n)
15
>>> print(u) # doctest: +ELLIPSIS
1.666666...67
>>> print(s)
0.59135639081
"""
# assertions
assert n1 >= 0
assert n2 >= 0
assert s1 >= 0
assert s2 >= 0
# combine total number of samples
n = n1 + n2
# check for zero case
if n == 0:
u = 0
s = 0
return (n, u, s)
# calculate the combined mean
u = 1/n * (n1*u1 + n2*u2)
# calculate the combined standard deviation
if n != 1:
s = np.sqrt(1/(n-1) * ( (n1-1)*s1**2 + n1*u1**2 + (n2-1)*s2**2 + n2*u2**2 - n*u**2))
else:
# special case where one of the data sets is empty
if n1 == 1:
s = s1
elif n2 == 1:
s = s2
else:
# shouldn't be able to ever reach this line with assertions on
raise ValueError('Total samples are 1, but neither data set has only one item.') # pragma: no cover
return (n, u, s)
#%% Functions - icer
def icer(cost, qaly, names=None, baseline=None, make_plot=False, opts=None):
r"""
Calculates the incremental cost effectiveness ratios with steps to throw out dominated strategies.
Summary
-------
In a loop, the code sorts by cost, throws out strongly dominated strategies (qaly doesn't
improve despite higher costs), calculates an incremental cost, qaly and cost effectiveness
ratio, then throws out weakly dominated strategies (icer doesn't improve over cheaper options)
and finally returns the incremental cost, qaly and ratios for the remaining "frontier" options
along with an order variable to map them back to the inputs.
Parameters
----------
cost : (N) array_like
Cost of each strategy
qaly : (N) array_like
Quality adjusted life years (QALY) gained by each strategy
names : (N) array_like, optional
Names of the different strategies
baseline : int, optional
Index of baseline strategy to use for cost comparisons, if not nan
make_plot : bool, optional
True/false flag for whether to plot the data
opts : class Opts, optional
Plotting options
Results
-------
inc_cost : (M) ndarray
incremental costs - see note 1
inc_qaly : (M) ndarray
incremental QALYs gained
icer_out : (M) ndarray
incremental cost effectiveness ratios
order : (N) ndarray
order mapping to the original inputs, with NaNs for dominated strategies
icer_data : (N) pandas dataframe
ICER data as a pandas dataframe
fig : (object) figure handle or None
Figure handle for any figure that was produced
Notes
-----
#. N may be smaller than M due to dominated strategies being removed. The order variable
will have (M - N) values set to NaN.
Examples
--------
>>> from dstauffman import icer
>>> cost = [250e3, 750e3, 2.25e6, 3.75e6]
>>> qaly = [20., 30, 40, 80]
>>> (inc_cost, inc_qaly, icer_out, order, icer_data, fig) = icer(cost, qaly)
>>> print(inc_cost) # doctest: +NORMALIZE_WHITESPACE
[ 250000. 500000. 3000000.]
>>> print(inc_qaly) # doctest: +NORMALIZE_WHITESPACE
[ 20. 10. 50.]
>>> print(icer_out) # doctest: +NORMALIZE_WHITESPACE
[ 12500. 50000. 60000.]
>>> print(order) # doctest: +NORMALIZE_WHITESPACE
[ 0. 1. nan 2.]
"""
# force inputs to be ndarrays
cost = np.atleast_1d(np.asarray(cost))
qaly = np.atleast_1d(np.asarray(qaly))
fig = None
# check inputs
assert np.all(cost > 0), 'Costs must be positive.'
assert np.all(qaly > 0), 'Qalys must be positive.'
assert cost.shape == qaly.shape, 'Cost and Qalys must have same size.'
assert cost.size > 0, 'Costs and Qalys cannot be empty.'
# alias the number of strategies
num = cost.size
# build an index order variable to keep track of strategies
keep = list(range(num))
# enter processing loop
while True:
# pull out current values based on evolving order mask
this_cost = cost[keep]
this_qaly = qaly[keep]
# sort by cost
ix_sort = np.argsort(this_cost)
sorted_cost = this_cost[ix_sort]
sorted_qaly = this_qaly[ix_sort]
# check for strongly dominated strategies
if not np.all(np.diff(sorted_qaly) >= 0):
# find the first occurence (increment by one to find the one less effective than the last)
bad = np.nonzero(np.diff(sorted_qaly) < 0)[0] + 1
if len(bad) == 0:
raise ValueError('Index should never be empty, something unexpected happended.') # pragma: no cover
# update the mask and continue to next pass of while loop
keep.pop(ix_sort[bad[0]])
continue
# calculate incremental costs
inc_cost = np.hstack((sorted_cost[0], np.diff(sorted_cost)))
inc_qaly = np.hstack((sorted_qaly[0], np.diff(sorted_qaly)))
icer_out = inc_cost / inc_qaly
# check for weakly dominated strategies
if not np.all(np.diff(icer_out) >= 0):
# find the first bad occurence
bad = np.nonzero(np.diff(icer_out) < 0)[0]
if len(bad) == 0:
raise ValueError('Index should never be empty, something unexpected happended.') # pragma: no cover
# update mask and continue to next pass
keep.pop(ix_sort[bad[0]])
continue
# if no continue statements were reached, then another iteration is not necessary, so break out
break
# save the final ordering
order = np.nan * np.ones(cost.shape)
order[keep] = ix_sort
# build an index to pull data out
temp = np.nonzero(~np.isnan(order))[0]
ix = temp[order[~np.isnan(order)].astype(int)]
# recalculate based on given baseline
if baseline is not None:
inc_cost = np.diff(np.hstack((cost[baseline], cost[ix])))
inc_qaly = np.diff(np.hstack((qaly[baseline], qaly[ix])))
icer_out = inc_cost / inc_qaly
# output as dataframe
# build a name list if not given
if names is None:
names = ['Strategy {}'.format(i+1) for i in range(num)]
# preallocate some variables
full_inc_costs = np.nan * np.ones((num))
full_inc_qalys = np.nan * np.ones((num))
full_icers = np.nan * np.ones((num))
# fill the calculations in where applicable
full_inc_costs[ix] = inc_cost
full_inc_qalys[ix] = inc_qaly
full_icers[ix] = icer_out
# make into dictionary with more explicit column names
data = OrderedDict()
data['Strategy'] = names
data['Cost'] = cost
data['QALYs'] = qaly
data['Increment_Costs'] = full_inc_costs
data['Incremental_QALYs'] = full_inc_qalys
data['ICER'] = full_icers
data['Order'] = order
# make the whole data set into a dataframe
icer_data = pd.DataFrame.from_dict(data)
icer_data.set_index('Strategy', inplace=True)
# Make a plot
if make_plot:
# check optional inputs
if opts is None:
opts = Opts()
# create a figure and axis
fig = plt.figure()
fig.canvas.set_window_title('Cost Benefit Frontier')
ax = fig.add_subplot(111)
# plot the data
ax.plot(qaly, cost, 'ko', label='strategies')
ax.plot(qaly[ix], cost[ix], 'r.', markersize=20, label='frontier')
# get axis limits before (0,0) point is added
lim = ax.axis()
# add ICER lines
if baseline is None:
ax.plot(np.hstack((0, qaly[ix])), np.hstack((0, cost[ix])), 'r-', label='ICERs')
else:
ax.plot(np.hstack((0, qaly[ix[0]])), np.hstack((0, cost[ix[0]])), 'r:')
ax.plot(np.hstack((qaly[baseline], qaly[ix])), np.hstack((cost[baseline], cost[ix])), 'r-', label='ICERs')
# Label each point
dy = (lim[3] - lim[2]) / 100
for i in range(num):
ax.annotate(names[i], xy=(qaly[i], cost[i]+dy), xycoords='data', horizontalalignment='center', \
verticalalignment='bottom', fontsize=12)
# add some labels and such
ax.set_title(fig.canvas.get_window_title())
ax.set_xlabel('Benefits')
ax.set_ylabel('Costs')
ax.legend(loc='upper left')
ax.grid(True)
# reset limits with including (0,0) point in case it skews everything too much
ax.axis(lim)
# add standard plotting features
setup_plots(fig, opts, 'dist_no_yscale')
return (inc_cost, inc_qaly, icer_out, order, icer_data, fig)
#%% Functions - bounded_normal_draw
def bounded_normal_draw(num, values, field, prng):
r"""
Creates a normalized distribution with the given mean and standard deviations, plus optional
bounds, all taken from a dictionary with the specified `field` name.
Parameters
----------
num : int
Number of random draws to make
values : dict
Dictionary of mean, std, min and max values
field : str
Name of field that is prepended to the values
prng : class numpy.random.RandomState
Pseudo-random number generator
Returns
-------
out : ndarray (N,)
Normalized random numbers
Notes
-----
#. Written by David C. Stauffer in March 2017.
Examples
--------
>>> from dstauffman import bounded_normal_draw
>>> import numpy as np
>>> num = 10
>>> values = {'last_mean': 2, 'last_std': 0.5, 'last_min': 1, 'last_max': 3}
>>> field = 'last'
>>> prng = np.random.RandomState()
>>> out = bounded_normal_draw(num, values, field, prng)
"""
# get values from the dictionary
try:
this_mean = values[field + '_mean']
except KeyError:
this_mean = 0
try:
this_std = values[field + '_std']
except KeyError:
this_std = 1
try:
this_min = values[field + '_min']
except KeyError:
this_min = -np.inf
try:
this_max = values[field + '_max']
except KeyError:
this_max = np.inf
# calculate the normal distribution
if this_std == 0:
out = this_mean * np.ones(num)
else:
out = prng.normal(this_mean, this_std, size=num)
# enforce the min and maxes
np.minimum(out, this_max, out)
np.maximum(out, this_min, out)
return out
#%% Unit test
if __name__ == '__main__':
plt.ioff()
unittest.main(module='tests.test_stats', exit=False)
doctest.testmod(verbose=False)
| lgpl-3.0 |
marvinlenk/subsystem_entropy_epsplots | pyplot_eps/ent_eps.py | 1 | 6836 | import numpy as np
import os
from mpEntropy import mpSystem
import matplotlib as mpl
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
from scipy.signal import savgol_filter
from scipy.optimize import curve_fit
# This is a workaround until scipy fixes the issue
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# Load sysVar
sysVar = mpSystem("../interact_0.ini", plotOnly=True)
# Create plot folder
pltfolder = "./epsplots/"
if not os.path.exists(pltfolder):
os.mkdir(pltfolder)
print("Plotting", end='')
mpl.use('Agg')
# minimum and maximum times to plot
min_time = 0
max_time = 3
inlay_min_time = 10
inlay_max_time = 100
inlay_log_min_time = 0
inlay_log_max_time = 3
# styles and stuff
avgstyle = 'dashed'
avgsize = 0.6
expectstyle = 'solid'
expectsize = 1
legend_size = 10
font_size = 10
# https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inches
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
# padding in units of fontsize
padding = 0.32
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'lines.linewidth': 1,
'figure.figsize': fig_size,
'legend.frameon': False,
'legend.loc': 'best',
'mathtext.default': 'rm' # see http://matplotlib.org/users/customizing.html
}
plt.rcParams['agg.path.chunksize'] = 0
plt.rcParams.update(params)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Arial']})
loavgpercent = sysVar.plotLoAvgPerc # percentage of time evolution to start averaging
loavgind = int(loavgpercent * sysVar.dataPoints) # index to start at when calculating average and stddev
loavgtime = np.round(loavgpercent * (sysVar.deltaT * sysVar.steps * sysVar.plotTimeScale), 2)
# stuff for averaging
if sysVar.boolPlotAverages:
print(' with averaging from Jt=%.2f' % loavgtime, end='')
fwidth = sysVar.plotSavgolFrame
ford = sysVar.plotSavgolOrder
ent_array = np.loadtxt('../data/entropy.txt')
# multiply step array with time scale
step_array = ent_array[:, 0] * sysVar.plotTimeScale
min_index = int(min_time / step_array[-1] * len(step_array))
max_index = int(max_time / step_array[-1] * len(step_array))
inlay_min_index = int(inlay_min_time / step_array[-1] * len(step_array))
inlay_max_index = int(inlay_max_time / step_array[-1] * len(step_array))
inlay_log_min_index = int(inlay_log_min_time / step_array[-1] * len(step_array))
inlay_log_max_index = int(inlay_log_max_time / step_array[-1] * len(step_array))
#### Complete system Entropy
if os.path.isfile('../data/total_entropy.txt'):
totent_array = np.loadtxt('../data/total_entropy.txt')
plt.plot(totent_array[min_index:max_index, 0] * sysVar.plotTimeScale, totent_array[min_index:max_index, 1] * 1e13,
linewidth=0.6, color='r')
plt.grid()
plt.xlabel(r'$J\,t$')
plt.ylabel(r'Total system entropy $/ 10^{-13}$')
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'entropy_total.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
### Subsystem Entropy
fldat = open(pltfolder + 'ent_fluctuation_N' + str(sysVar.N) + '.txt', 'w')
fldat.write('N_tot: %i\n' % sysVar.N)
avg = np.mean(ent_array[loavgind:, 1], dtype=np.float64)
stddev = np.std(ent_array[loavgind:, 1], dtype=np.float64)
fldat.write('ssent_average: %.16e\n' % avg)
fldat.write('ssent_stddev: %.16e\n' % stddev)
fldat.write('ssent_rel._fluctuation: %.16e\n' % (stddev / avg))
fldat.close()
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'entropy_subsystem.eps', format='eps', dpi=1000)
plt.clf()
# Subsystem entropy with logarithmic inlay
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
a = plt.axes([.5, .35, .4, .4])
plt.semilogy(step_array[inlay_log_min_index:inlay_log_max_index],
np.abs(avg - ent_array[inlay_log_min_index:inlay_log_max_index, 1]), color='r')
plt.ylabel(r'$|\,\overline{S}\textsubscript{sys} - S\textsubscript{sys}(t)|$')
plt.yticks([])
plt.savefig(pltfolder + 'entropy_subsystem_inlay_log.eps', format='eps', dpi=1000)
plt.clf()
# Subsystem entropy with inlay
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
a = plt.axes([.45, .35, .4, .4])
plt.plot(step_array[inlay_min_index:inlay_max_index], avg - ent_array[inlay_min_index:inlay_max_index, 1],
linewidth=0.2, color='r')
plt.ylabel(r'$\overline{S}\textsubscript{sys} - S\textsubscript{sys}(t)$')
a.yaxis.tick_right()
tmp_ticks = list(a.get_xticks())
tmp_ticks.pop(0)
if tmp_ticks[-1] >= inlay_max_time:
tmp_ticks.pop(-1)
a.set_xticks(tmp_ticks + [inlay_min_time])
plt.savefig(pltfolder + 'entropy_subsystem_inlay.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
# histogram of fluctuations
n, bins, patches = plt.hist(ent_array[loavgind:, 1] - avg, 51, normed=1, rwidth=0.8, align='mid')
(mu, sigma) = norm.fit(ent_array[loavgind:, 1] - avg)
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r--')
mu_magnitude = np.floor(np.log10(np.abs(mu)))
mu /= np.power(10, mu_magnitude)
sigma_magnitude = np.floor(np.log10(sigma))
sigma /= np.power(10, sigma_magnitude)
plt.figtext(0.965, 0.80,
'$\mu = %.2f \cdot 10^{%i}$\n$\sigma = %.2f \cdot 10^{%i}$' % (mu, mu_magnitude, sigma, sigma_magnitude),
ha='right', va='bottom', multialignment="left")
plt.xlabel(r'$\Delta S_{sub}$')
plt.ylabel(r'PD')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'entropy_subsystem_fluctuations.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
print(" done!")
| bsd-2-clause |
Lawrence-Liu/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
KarlTDebiec/myplotspec_sim | moldynplot/dataset/MDGXDataset.py | 2 | 2501 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.dataset.MDGXDataset.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Represents MDGX force field parameterization data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot.dataset")
import moldynplot.dataset
from IPython import embed
import h5py
import numpy as np
import pandas as pd
from ..myplotspec.Dataset import Dataset
from ..myplotspec import sformat, wiprint
################################### CLASSES ###################################
class MDGXDataset(Dataset):
"""
Represents MDGX force field parameterization data
"""
@classmethod
def get_cache_key(cls, infile, selections=None, *args, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset
cache.
.. todo:
- Verify that keyword arguments passed to pandas may be safely
converted to hashable tuple, and if they cannot throw a
warning and load dataset without memoization
"""
from os.path import expandvars
read_csv_kw = []
for key, value in kwargs.get("read_csv_kw", {}).items():
if isinstance(value, list):
value = tuple(value)
read_csv_kw.append((key, value))
return (
cls, expandvars(infile), tuple(selections), tuple(read_csv_kw))
def __init__(self, infile, selections=None, **kwargs):
"""
"""
from os.path import expandvars
# Load
super(MDGXDataset, self).__init__(infile=infile, **kwargs)
dataframe = self.dataframe
dataframe.index.name = "conformation"
dataframe["error"] = np.abs(
dataframe["qm_energy"] - dataframe["mm_energy"])
selection_dataframes = []
if selections is not None:
for selection in selections:
selection_dataframes.append(dataframe[
dataframe["topology"].str.endswith("/" + selection)])
self.selections = selection_dataframes
#################################### MAIN #####################################
if __name__ == "__main__":
MDGXDataset.main()
| bsd-3-clause |
funbaker/astropy | examples/coordinates/plot_galactocentric-frame.py | 3 | 8006 | # -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
-------------------
*By: Adrian Price-Whelan*
*License: BSD*
-------------------
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.ICRS(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s)
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun must be specified as a
# `~astropy.coordinates.CartesianDifferential` instance, as in the example
# below. Note that, as with the positions, the Galactocentric frame is a
# right-handed system - the x-axis is positive towards the Galactic center, so
# ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = coord.CartesianDifferential([11.1, 244, 7.25]*u.km/u.s)
gc_frame = coord.Galactocentric(galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=coord.CartesianDifferential(vx, vy, vz),
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.Galactocentric(ring_rep)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel('$v_x$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
axes[1].set_ylabel('$v_y$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
fig.tight_layout()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig,ax = plt.subplots(1, 1, figsize=(8,6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(r'$\mu_l \, \cos b$ [{0}]'.format((u.mas/u.yr).to_string('latex_inline')))
ax.legend()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.