repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
basaks/PyKrige | pykrige/test.py | 1 | 91554 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Testing code.
Updated BSM March 2016
"""
import unittest
import os
import numpy as np
from itertools import product
from pykrige import kriging_tools as kt
from pykrige import core
from pykrige import variogram_models
from pykrige.ok import OrdinaryKriging
from pykrige.uk import UniversalKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk3d import UniversalKriging3D
from pykrige.compat import SKLEARN_INSTALLED
class TestPyKrige(unittest.TestCase):
def setUp(self):
self.test_data = np.genfromtxt(os.path.join(os.getcwd(), 'test_data/test_data.txt'))
self.ok_test_answer, self.ok_test_gridx, self.ok_test_gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test1_answer.asc'), footer=2)
self.uk_test_answer, self.uk_test_gridx, self.uk_test_gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test2_answer.asc'), footer=2)
self.simple_data = np.array([[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74]])
self.simple_gridx = np.arange(0.0, 6.0, 1.0)
self.simple_gridx_2 = np.arange(0.0, 5.5, 0.5)
self.simple_gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(self.simple_gridx, self.simple_gridy)
self.mask = np.array(xi == yi)
self.simple_data_3d = np.array([[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7]])
self.simple_gridx_3d = np.arange(0.0, 0.6, 0.05)
self.simple_gridy_3d = np.arange(0.0, 0.6, 0.01)
self.simple_gridz_3d = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(self.simple_gridz_3d, self.simple_gridy_3d, self.simple_gridx_3d, indexing='ij')
self.mask_3d = np.array((xi == yi) & (yi == zi))
def test_core_adjust_for_anisotropy(self):
X = np.array([[1.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, -1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([-2.0, 0.0, 2.0, 0.0])))
def test_core_adjust_for_anisotropy_3d(self):
# this is a bad examples, as the X matrix is symmetric
# and insensitive to transpositions
X = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0., 0., 0.], [2., 2.], [90., 0., 0.])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([1., 0., 0.])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([0., 0., 2.])))
self.assertTrue(np.allclose(X_adj[:, 2], np.array([0., -2., 0.])))
X_adj = core._adjust_for_anisotropy(X, [0., 0., 0.], [2., 2.], [0., 90., 0.])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([0., 0., -1.])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([0., 2., 0.])))
self.assertTrue(np.allclose(X_adj[:, 2], np.array([2., 0., 0.])))
X_adj = core._adjust_for_anisotropy(X, [0., 0., 0.], [2., 2.], [0., 0., 90.])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([0., 1., 0.])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([-2., 0., 0.])))
self.assertTrue(np.allclose(X_adj[:, 2], np.array([0., 0., 2.])))
def test_core_initialize_variogram_model(self):
# Note the variogram_function argument is not a string in real life...
self.assertRaises(ValueError, core.initialize_variogram_model, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], 'linear', [0.0], 'linear', 6, False,
'euclidean')
self.assertRaises(ValueError, core.initialize_variogram_model, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], 'spherical', [0.0], 'spherical', 6, False,
'euclidean')
x = np.array([1.0 + n/np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n/np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core.initialize_variogram_model(x, y, z, 'linear',
[0.0, 0.0], 'linear',
6, False, 'euclidean')
self.assertTrue(np.allclose(lags, np.array([1.0, 2.0, 3.0])))
self.assertTrue(np.allclose(semivariance, np.array([0.5, 2.0, 4.5])))
def test_core_initialize_variogram_model_3d(self):
# Note the variogram_function argument is not a string in real life...
self.assertRaises(ValueError, core.initialize_variogram_model_3d, self.simple_data_3d[:, 0],
self.simple_data_3d[:, 1], self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
'linear', [0.0], 'linear', 6, False)
self.assertRaises(ValueError, core.initialize_variogram_model_3d, self.simple_data_3d[:, 0],
self.simple_data_3d[:, 1], self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
'spherical', [0.0], 'spherical', 6, False)
lags, semivariance, variogram_model_parameters = core.initialize_variogram_model_3d(np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
'linear', [0.0, 0.0],
'linear', 3, False)
self.assertTrue(np.allclose(lags, np.array([np.sqrt(3.), 2.*np.sqrt(3.), 3.*np.sqrt(3.)])))
self.assertTrue(np.allclose(semivariance, np.array([0.5, 2.0, 4.5])))
def test_core_calculate_variogram_model(self):
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]),
'linear', variogram_models.linear_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([0.98, 1.05]), 0.01, 0.01))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]),
'linear', variogram_models.linear_variogram_model, True)
self.assertTrue(np.allclose(res, np.array([0.98, 1.05]), 0.01, 0.01))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.0, 2.8284, 5.1962, 8.0]),
'power', variogram_models.power_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([1.0, 1.5, 0.0])))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.0, 1.4142, 1.7321, 2.0]),
'power', variogram_models.power_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([1.0, 0.5, 0.0])))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.2642, 1.7293, 1.9004, 1.9634]),
'exponential', variogram_models.exponential_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([0.5769, 1.4872, 1.9065, 1.9914]),
'gaussian', variogram_models.gaussian_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001))
def test_core_krige(self):
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22],
[43.8, 24.6, 2.822]])
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (18.8, 67.9),
variogram_models.linear_variogram_model, [0.006, 0.1],
'euclidean')
self.assertAlmostEqual(z, 1.6364, 4)
self.assertAlmostEqual(ss, 0.4201, 4)
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (43.8, 24.6),
variogram_models.linear_variogram_model, [0.006, 0.1],
'euclidean')
self.assertAlmostEqual(z, 2.822, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_core_krige_3d(self):
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22],
[43.8, 24.6, 1.0, 2.822]])
z, ss = core.krige_3d(data[:, 0], data[:, 1], data[:, 2], data[:, 3], (18.8, 67.9, 1.0),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 1.6364, 4)
self.assertAlmostEqual(ss, 0.4201, 4)
z, ss = core.krige_3d(data[:, 0], data[:, 1], data[:, 2], data[:, 3], (43.8, 24.6, 1.0),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 2.822, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_ok(self):
# Test to compare OK results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='exponential', variogram_parameters=[500.0, 3000.0, 0.0])
z, ss = ok.execute('grid', self.ok_test_gridx, self.ok_test_gridy, backend='vectorized')
self.assertTrue(np.allclose(z, self.ok_test_answer))
z, ss = ok.execute('grid', self.ok_test_gridx, self.ok_test_gridy, backend='loop')
self.assertTrue(np.allclose(z, self.ok_test_answer))
def test_ok_update_variogram_model(self):
self.assertRaises(ValueError, OrdinaryKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], variogram_model='blurg')
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
self.assertRaises(ValueError, ok.update_variogram_model, 'blurg')
ok.update_variogram_model('power', anisotropy_scaling=3.0, anisotropy_angle=45.0)
self.assertFalse(variogram_model == ok.variogram_model)
self.assertFalse(variogram_parameters == ok.variogram_model_parameters)
self.assertFalse(anisotropy_scaling == ok.anisotropy_scaling)
self.assertFalse(anisotropy_angle == ok.anisotropy_angle)
def test_ok_execute(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
self.assertRaises(ValueError, ok.execute, 'blurg', self.simple_gridx, self.simple_gridy)
z, ss = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
z, ss = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
self.assertRaises(IOError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='vectorized')
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(IOError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='loop')
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(ValueError, ok.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='vectorized')
z, ss = ok.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='vectorized')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, ok.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='loop')
z, ss = ok.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='loop')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
def test_cython_ok(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z1, ss1 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
z2, ss2 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='C')
self.assertTrue(np.allclose(z1, z2))
self.assertTrue(np.allclose(ss1, ss2))
closest_points = 4
z1, ss1 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop',
n_closest_points=closest_points)
z2, ss2 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='C',
n_closest_points=closest_points)
self.assertTrue(np.allclose(z1, z2))
self.assertTrue(np.allclose(ss1, ss2))
def test_uk(self):
# Test to compare UK with linear drift to results from KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='exponential', variogram_parameters=[500.0, 3000.0, 0.0],
drift_terms=['regional_linear'])
z, ss = uk.execute('grid', self.uk_test_gridx, self.uk_test_gridy, backend='vectorized')
self.assertTrue(np.allclose(z, self.uk_test_answer))
z, ss = uk.execute('grid', self.uk_test_gridx, self.uk_test_gridy, backend='loop')
self.assertTrue(np.allclose(z, self.uk_test_answer))
def test_uk_update_variogram_model(self):
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], variogram_model='blurg')
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['external_Z'])
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['external_Z'], external_drift=np.array([0]))
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['point_log'])
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2])
variogram_model = uk.variogram_model
variogram_parameters = uk.variogram_model_parameters
anisotropy_scaling = uk.anisotropy_scaling
anisotropy_angle = uk.anisotropy_angle
self.assertRaises(ValueError, uk.update_variogram_model, 'blurg')
uk.update_variogram_model('power', anisotropy_scaling=3.0, anisotropy_angle=45.0)
self.assertFalse(variogram_model == uk.variogram_model)
self.assertFalse(variogram_parameters == uk.variogram_model_parameters)
self.assertFalse(anisotropy_scaling == uk.anisotropy_scaling)
self.assertFalse(anisotropy_angle == uk.anisotropy_angle)
def test_uk_calculate_data_point_zscalars(self):
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'])
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem, external_drift_x=dem_x,
external_drift_y=np.arange(0.0, 5.0, 1.0))
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem, external_drift_x=dem_x,
external_drift_y=dem_y)
self.assertTrue(np.allclose(uk.z_scalars, self.simple_data[:, 0]))
xi, yi = np.meshgrid(np.arange(0.0, 5.3, 0.1), self.simple_gridy)
self.assertRaises(ValueError, uk._calculate_data_point_zscalars, xi, yi)
xi, yi = np.meshgrid(np.arange(0.0, 5.0, 0.1), self.simple_gridy)
z_scalars = uk._calculate_data_point_zscalars(xi, yi)
self.assertTrue(np.allclose(z_scalars[0, :], np.arange(0.0, 5.0, 0.1)))
def test_uk_execute_single_point(self):
# Test data and answer from lecture notes by Nicolas Christou, UCLA Stats
data = np.array([[61.0, 139.0, 477.0],
[63.0, 140.0, 696.0],
[64.0, 129.0, 227.0],
[68.0, 128.0, 646.0],
[71.0, 140.0, 606.0],
[73.0, 141.0, 791.0],
[75.0, 128.0, 783.0]])
point = (65.0, 137.0)
z_answer = 567.54
ss_answer = 9.044
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model='exponential',
variogram_parameters=[10.0, 9.99, 0.0], drift_terms=['regional_linear'])
z, ss = uk.execute('points', np.array([point[0]]), np.array([point[1]]), backend='vectorized')
self.assertAlmostEqual(z_answer, z[0], places=0)
self.assertAlmostEqual(ss_answer, ss[0], places=0)
z, ss = uk.execute('points', np.array([61.0]), np.array([139.0]), backend='vectorized')
self.assertAlmostEqual(z[0], 477.0, 3)
self.assertAlmostEqual(ss[0], 0.0, 3)
z, ss = uk.execute('points', np.array([61.0]), np.array([139.0]), backend='loop')
self.assertAlmostEqual(z[0], 477.0, 3)
self.assertAlmostEqual(ss[0], 0.0, 3)
def test_uk_execute(self):
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
self.assertRaises(ValueError, uk.execute, 'blurg', self.simple_gridx, self.simple_gridy)
self.assertRaises(ValueError, uk.execute, 'grid', self.simple_gridx, self.simple_gridy, backend='mrow')
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
self.assertRaises(IOError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='vectorized')
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(IOError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='loop')
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(ValueError, uk.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='vectorized')
z, ss = uk.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='vectorized')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, uk.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='loop')
z, ss = uk.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='loop')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
def test_ok_uk_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_ok, ss_ok = ok.execute('grid', gridx, gridy, backend='vectorized')
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_uk, ss_uk = uk.execute('grid', gridx, gridy, backend='vectorized')
self.assertTrue(np.allclose(z_ok, z_uk))
self.assertTrue(np.allclose(ss_ok, ss_uk))
z_ok, ss_ok = ok.execute('grid', gridx, gridy, backend='loop')
z_uk, ss_uk = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_ok, z_uk))
self.assertTrue(np.allclose(ss_ok, ss_uk))
def test_ok_backends_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_ok_v, ss_ok_v = ok.execute('grid', gridx, gridy, backend='vectorized')
z_ok_l, ss_ok_l = ok.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_ok_v, z_ok_l))
self.assertTrue(np.allclose(ss_ok_v, ss_ok_l))
def test_uk_backends_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_uk_v, ss_uk_v = uk.execute('grid', gridx, gridy, backend='vectorized')
z_uk_l, ss_uk_l = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_uk_v, z_uk_l))
self.assertTrue(np.allclose(ss_uk_v, ss_uk_l))
def test_kriging_tools(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z_write, ss_write = ok.execute('grid', self.simple_gridx, self.simple_gridy)
kt.write_asc_grid(self.simple_gridx, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=1)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.allclose(z_write, z_read, 0.01, 0.01))
self.assertTrue(np.allclose(self.simple_gridx, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
z_write, ss_write = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask)
kt.write_asc_grid(self.simple_gridx, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=1)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.ma.allclose(z_write, np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True, rtol=0.01, atol=0.01))
self.assertTrue(np.allclose(self.simple_gridx, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z_write, ss_write = ok.execute('grid', self.simple_gridx_2, self.simple_gridy)
kt.write_asc_grid(self.simple_gridx_2, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=2)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.allclose(z_write, z_read, 0.01, 0.01))
self.assertTrue(np.allclose(self.simple_gridx_2, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
os.remove(os.path.join(os.getcwd(), 'test_data/temp.asc'))
def test_uk_three_primary_drifts(self):
well = np.array([[1.1, 1.1, -1.0]])
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'external_Z', 'point_log'],
point_drift=well, external_drift=dem, external_drift_x=dem_x, external_drift_y=dem_y)
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
self.assertEquals(z.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertEquals(ss.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertTrue(np.all(np.isfinite(z)))
self.assertFalse(np.all(np.isnan(z)))
self.assertTrue(np.all(np.isfinite(ss)))
self.assertFalse(np.all(np.isnan(ss)))
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
self.assertEquals(z.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertEquals(ss.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertTrue(np.all(np.isfinite(z)))
self.assertFalse(np.all(np.isnan(z)))
self.assertTrue(np.all(np.isfinite(ss)))
self.assertFalse(np.all(np.isnan(ss)))
def test_uk_specified_drift(self):
xg, yg = np.meshgrid(self.simple_gridx, self.simple_gridy)
well = np.array([[1.1, 1.1, -1.0]])
point_log = well[0, 2] * np.log(np.sqrt((xg - well[0, 0])**2. + (yg - well[0, 1])**2.)) * -1.
if np.any(np.isinf(point_log)):
point_log[np.isinf(point_log)] = -100. * well[0, 2] * -1.
point_log_data = well[0, 2] * np.log(np.sqrt((self.simple_data[:, 0] - well[0, 0])**2. +
(self.simple_data[:, 1] - well[0, 1])**2.)) * -1.
if np.any(np.isinf(point_log_data)):
point_log_data[np.isinf(point_log_data)] = -100. * well[0, 2] * -1.
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'])
self.assertRaises(TypeError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'],
specified_drift=self.simple_data[:, 0])
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:2, 0]])
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:, 0], self.simple_data[:, 1]])
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[self.simple_gridx, self.simple_gridy])
self.assertRaises(TypeError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=self.simple_gridx)
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[xg])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy, specified_drift_arrays=[xg, yg])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[point_log_data])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[point_log])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['point_log'], point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:, 0], self.simple_data[:, 1], point_log_data])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[xg, yg, point_log])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'point_log'],
point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
def test_uk_functional_drift(self):
well = np.array([[1.1, 1.1, -1.0]])
func_x = lambda x, y: x
func_y = lambda x, y: y
func_well = lambda x, y: - well[0, 2] * np.log(np.sqrt((x - well[0, 0])**2. + (y - well[0, 1])**2.))
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['functional'])
self.assertRaises(TypeError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['functional'],
functional_drift=func_x)
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'], functional_drift=[func_well])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['point_log'], point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y, func_well])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'point_log'],
point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
def test_uk_with_external_drift(self):
dem, demx, demy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test3_dem.asc'))
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='spherical',
variogram_parameters=[500.0, 3000.0, 0.0],
anisotropy_scaling=1.0, anisotropy_angle=0.0,
drift_terms=['external_Z'], external_drift=dem,
external_drift_x=demx, external_drift_y=demy,
verbose=False)
answer, gridx, gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test3_answer.asc'))
z, ss = uk.execute('grid', gridx, gridy, backend='vectorized')
self.assertTrue(np.allclose(z, answer))
z, ss = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z, answer))
def test_force_exact(self):
data = np.array([[1., 1., 2.],
[2., 2., 1.5],
[3., 3., 1.]])
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[1.0, 1.0])
z, ss = ok.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='vectorized')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = ok.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='vectorized')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = ok.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='vectorized')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = ok.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = ok.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = ok.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='vectorized',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = ok.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='loop')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = ok.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='loop')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = ok.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='loop')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = ok.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = ok.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = ok.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='loop',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
z, ss = uk.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='vectorized')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = uk.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='vectorized')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = uk.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='vectorized')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = uk.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = uk.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = uk.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='vectorized',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = uk.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='loop')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = uk.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='loop')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = uk.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='loop')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = uk.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = uk.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = uk.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='loop',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (1., 1.),
variogram_models.linear_variogram_model, [1.0, 1.0],
'euclidean')
self.assertAlmostEqual(z, 2.)
self.assertAlmostEqual(ss, 0.)
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (1., 2.),
variogram_models.linear_variogram_model, [1.0, 1.0],
'euclidean')
self.assertNotAlmostEqual(ss, 0.)
data = np.zeros((50, 3))
x, y = np.meshgrid(np.arange(0., 10., 1.), np.arange(0., 10., 2.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(x) * np.ravel(y)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[100.0, 1.0])
z, ss = ok.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='vectorized')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='vectorized')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='loop')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='loop')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[100.0, 1.0])
z, ss = uk.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='vectorized')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='vectorized')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='loop')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='loop')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
def test_custom_variogram(self):
func = lambda params, dist: params[0] * np.log10(dist + params[1]) + params[2]
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='mrow')
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom')
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=0)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=func)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(uk.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear')
uk.update_variogram_model('custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(uk.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='mrow')
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom')
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=0)
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=func)
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(ok.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear')
ok.update_variogram_model('custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(ok.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
def test_ok3d(self):
# Test to compare K3D results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
k3d = OrdinaryKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500.0, 3000.0, 0.0])
k, ss = k3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
self.assertTrue(np.allclose(k, self.ok_test_answer))
k, ss = k3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
self.assertTrue(np.allclose(k, self.ok_test_answer))
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ans = np.genfromtxt('./test_data/test3d_answer.txt')
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
def test_ok3d_moving_window(self):
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ans = np.genfromtxt('./test_data/test3d_answer.txt')
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop', n_closest_points=10)
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
def test_ok3d_uk3d_and_backends_produce_same_results(self):
ok3d = OrdinaryKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500.0, 3000.0, 0.0])
ok_v, oss_v = ok3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
ok_l, oss_l = ok3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
uk3d = UniversalKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500., 3000., 0.])
uk_v, uss_v = uk3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
self.assertTrue(np.allclose(uk_v, ok_v))
uk_l, uss_l = uk3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
self.assertTrue(np.allclose(uk_l, ok_l))
self.assertTrue(np.allclose(uk_l, uk_v))
self.assertTrue(np.allclose(uss_l, uss_v))
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ok3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
ok_v, oss_v = ok3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
ok_l, oss_l = ok3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
uk_v, uss_v = uk3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
self.assertTrue(np.allclose(uk_v, ok_v))
self.assertTrue(np.allclose(uss_v, oss_v))
uk_l, uss_l = uk3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
self.assertTrue(np.allclose(uk_l, ok_l))
self.assertTrue(np.allclose(uss_l, oss_l))
self.assertTrue(np.allclose(uk_l, uk_v))
self.assertTrue(np.allclose(uss_l, uss_v))
def test_ok3d_update_variogram_model(self):
self.assertRaises(ValueError, OrdinaryKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='blurg')
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
variogram_model = k3d.variogram_model
variogram_parameters = k3d.variogram_model_parameters
anisotropy_scaling_y = k3d.anisotropy_scaling_y
anisotropy_scaling_z = k3d.anisotropy_scaling_z
anisotropy_angle_x = k3d.anisotropy_angle_x
anisotropy_angle_y = k3d.anisotropy_angle_y
anisotropy_angle_z = k3d.anisotropy_angle_z
self.assertRaises(ValueError, k3d.update_variogram_model, 'blurg')
k3d.update_variogram_model('power', anisotropy_scaling_y=3.0, anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0, anisotropy_angle_y=45.0, anisotropy_angle_z=45.0)
self.assertFalse(variogram_model == k3d.variogram_model)
self.assertFalse(variogram_parameters == k3d.variogram_model_parameters)
self.assertFalse(anisotropy_scaling_y == k3d.anisotropy_scaling_y)
self.assertFalse(anisotropy_scaling_z == k3d.anisotropy_scaling_z)
self.assertFalse(anisotropy_angle_x == k3d.anisotropy_angle_x)
self.assertFalse(anisotropy_angle_y == k3d.anisotropy_angle_y)
self.assertFalse(anisotropy_angle_z == k3d.anisotropy_angle_z)
def test_uk3d_update_variogram_model(self):
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='blurg')
uk3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
variogram_model = uk3d.variogram_model
variogram_parameters = uk3d.variogram_model_parameters
anisotropy_scaling_y = uk3d.anisotropy_scaling_y
anisotropy_scaling_z = uk3d.anisotropy_scaling_z
anisotropy_angle_x = uk3d.anisotropy_angle_x
anisotropy_angle_y = uk3d.anisotropy_angle_y
anisotropy_angle_z = uk3d.anisotropy_angle_z
self.assertRaises(ValueError, uk3d.update_variogram_model, 'blurg')
uk3d.update_variogram_model('power', anisotropy_scaling_y=3.0, anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0, anisotropy_angle_y=45.0, anisotropy_angle_z=45.0)
self.assertFalse(variogram_model == uk3d.variogram_model)
self.assertFalse(variogram_parameters == uk3d.variogram_model_parameters)
self.assertFalse(anisotropy_scaling_y == uk3d.anisotropy_scaling_y)
self.assertFalse(anisotropy_scaling_z == uk3d.anisotropy_scaling_z)
self.assertFalse(anisotropy_angle_x == uk3d.anisotropy_angle_x)
self.assertFalse(anisotropy_angle_y == uk3d.anisotropy_angle_y)
self.assertFalse(anisotropy_angle_z == uk3d.anisotropy_angle_z)
def test_ok3d_backends_produce_same_result(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k_k3d_v, ss_k3d_v = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
backend='vectorized')
k_k3d_l, ss_k3d_l = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
backend='loop')
self.assertTrue(np.allclose(k_k3d_v, k_k3d_l))
self.assertTrue(np.allclose(ss_k3d_v, ss_k3d_l))
def test_ok3d_execute(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
self.assertRaises(ValueError, k3d.execute, 'blurg', self.simple_gridx_3d,
self.simple_gridy_3d, self.simple_gridz_3d)
k, ss = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
k, ss = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
self.assertRaises(IOError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='vectorized')
k, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='vectorized')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(IOError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='loop')
k, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='loop')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(ValueError, k3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='vectorized')
k, ss = k3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='vectorized')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, k3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='loop')
k, ss = k3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='loop')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
data = np.zeros((125, 4))
z, y, x = np.meshgrid(np.arange(0., 5., 1.), np.arange(0., 5., 1.), np.arange(0., 5., 1.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='vectorized')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='loop')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='vectorized')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='loop')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
def test_uk3d_execute(self):
uk3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
self.assertRaises(ValueError, uk3d.execute, 'blurg', self.simple_gridx_3d,
self.simple_gridy_3d, self.simple_gridz_3d)
k, ss = uk3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
k, ss = uk3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
self.assertRaises(IOError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='vectorized')
k, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='vectorized')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(IOError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='loop')
k, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='loop')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(ValueError, uk3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='vectorized')
k, ss = uk3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='vectorized')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, uk3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='loop')
k, ss = uk3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='loop')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
data = np.zeros((125, 4))
z, y, x = np.meshgrid(np.arange(0., 5., 1.), np.arange(0., 5., 1.), np.arange(0., 5., 1.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='vectorized')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='loop')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='vectorized')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='loop')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
def test_force_exact_3d(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='vectorized')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='loop')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='vectorized')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='loop')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
def test_uk3d_specified_drift(self):
zg, yg, xg = np.meshgrid(self.simple_gridz_3d, self.simple_gridy_3d, self.simple_gridx_3d, indexing='ij')
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
variogram_model='linear', drift_terms=['specified'])
self.assertRaises(TypeError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['specified'], specified_drift=self.simple_data_3d[:, 0])
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['specified'], specified_drift=[self.simple_data_3d[:2, 0]])
uk_spec = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2]])
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=[self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d])
self.assertRaises(TypeError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=self.simple_gridx_3d)
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=[zg])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
specified_drift_arrays=[xg, yg, zg])
uk_lin = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
def test_uk3d_functional_drift(self):
func_x = lambda x, y, z: x
func_y = lambda x, y, z: y
func_z = lambda x, y, z: z
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
variogram_model='linear', drift_terms=['functional'])
self.assertRaises(TypeError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['functional'], functional_drift=func_x)
uk_func = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y, func_z])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
uk_lin = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
def test_geometric_code(self):
# Create selected points distributed across the sphere:
N=4
lon = np.array([7.0, 7.0, 187.0, 73.231])
lat = np.array([13.23, 13.2301, -13.23, -79.3])
# For the points generated with this reference seed, the distance matrix
# has been calculated using geopy (v. 1.11.0) as follows:
# >>> from geopy.distance import great_circle
# >>> g = great_circle(radius=1.0)
# >>> d = np.zeros((N,N), dtype=float)
# >>> for i in range(N):
# >>> for j in range(N):
# >>> d[i,j] = g.measure((lat[i],lon[i]),(lat[j],lon[j]))
# >>> d *= 180.0/np.pi
# From that distance matrix, the reference values have been obtained.
d_ref = np.array(
[[0.0, 1e-4, 180.0, 98.744848317171801],
[1e-4, 0.0, 179.9999, 98.744946828324345],
[180.0, 179.9999, 0.0, 81.255151682828213],
[98.744848317171801, 98.744946828324345, 81.255151682828213, 0.0]]
)
# Calculate distance matrix using the PyKrige code:
d = np.zeros((N,N))
for i in range(N):
for j in range(N):
d[i,j] = core.great_circle_distance(lon[i],lat[i],lon[j],lat[j])
# Test agains reference values:
np.testing.assert_allclose(d, d_ref)
# Test general features:
np.testing.assert_allclose(d[np.eye(N,dtype=bool)], 0.0)
np.testing.assert_equal(d>=0.0, np.ones((N,N),dtype=bool))
np.testing.assert_allclose(d,d.T)
np.testing.assert_equal(d<=180.0,np.ones((N,N),dtype=bool))
# Test great_circle_distance and euclid3_to_great_circle against each other:
lon_ref = lon
lat_ref = lat
for i in range(len(lon_ref)):
lon, lat = np.meshgrid(np.linspace(0, 360.0, 20),
np.linspace(-90.0, 90.0, 20))
dx = np.cos(np.pi/180.0*lon)*np.cos(np.pi/180.0*lat)- \
np.cos(np.pi/180.0*lon_ref[i])*np.cos(np.pi/180.0*lat_ref[i])
dy = np.sin(np.pi/180.0*lon)*np.cos(np.pi/180.0*lat)- \
np.sin(np.pi/180.0*lon_ref[i])*np.cos(np.pi/180.0*lat_ref[i])
dz = np.sin(np.pi/180.0*lat) - np.sin(np.pi/180.0*lat_ref[i])
np.testing.assert_allclose(core.great_circle_distance(lon_ref[i], lat_ref[i], lon, lat),
core.euclid3_to_great_circle(np.sqrt(dx**2+dy**2+dz**2)), rtol=1e-5)
def test_ok_geometric(self):
# Generate random data:
np.random.seed(89239413)
lon = 360.0*np.random.rand(50, 1)
lat = 180.0*np.random.rand(50, 1) - 90.0
z = np.random.rand(50, 1)
#data = np.concatenate((lon, lat, z), 1)
# Generate grid:
grid_lon = 360.0*np.random.rand(120, 1)
grid_lat = 180.0*np.random.rand(120, 1) - 90.0
# Create ordinary kriging object:
OK = OrdinaryKriging(lon, lat, z, variogram_model='linear', verbose=False,
enable_plotting=False, coordinates_type='geographic')
# Execute on grid:
z, ss = OK.execute('grid', grid_lon, grid_lat)
@unittest.skipUnless(SKLEARN_INSTALLED, "scikit-learn not installed")
class TestKrige(unittest.TestCase):
@staticmethod
def method_and_vergiogram():
method = ['ordinary', 'universal', 'ordinary3d', 'universal3d']
variogram_model = ['linear', 'power', 'gaussian', 'spherical',
'exponential']
return product(method, variogram_model)
def test_krige(self):
from pykrige.rk import Krige
from pykrige.rk import threed_krige
from pykrige.compat import GridSearchCV
# dummy data
np.random.seed(1)
X = np.random.randint(0, 400, size=(20, 3)).astype(float)
y = 5 * np.random.rand(20)
for m, v in self.method_and_vergiogram():
param_dict = {'method': [m], 'variogram_model': [v]}
estimator = GridSearchCV(Krige(),
param_dict,
n_jobs=-1,
iid=False,
pre_dispatch='2*n_jobs',
verbose=False,
cv=5,
)
# run the gridsearch
if m in ['ordinary', 'universal']:
estimator.fit(X=X[:, :2], y=y)
else:
estimator.fit(X=X, y=y)
if hasattr(estimator, 'best_score_'):
if m in threed_krige:
assert estimator.best_score_ > -10.0
else:
assert estimator.best_score_ > -3.0
if hasattr(estimator, 'cv_results_'):
assert estimator.cv_results_['mean_train_score'] > 0
@unittest.skipUnless(SKLEARN_INSTALLED, "scikit-learn not installed")
class TestRegressionKrige(unittest.TestCase):
@staticmethod
def methods():
from sklearn.svm import SVR
from sklearn.linear_model import ElasticNet, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
krige_methods = ['ordinary', 'universal']
ml_methods = [SVR(C=0.01),
RandomForestRegressor(min_samples_split=5,
n_estimators=50),
LinearRegression(),
Lasso(),
ElasticNet()
]
return product(ml_methods, krige_methods)
def test_krige(self):
from pykrige.rk import RegressionKriging
from pykrige.compat import train_test_split
from itertools import product
np.random.seed(1)
x = np.linspace(-1., 1., 100)
# create a feature matrix with 5 features
X = np.tile(x, reps=(5, 1)).T
y = 1 + 5*X[:, 0] - 2*X[:, 1] - 2*X[:, 2] + 3*X[:, 3] + 4*X[:, 4] + \
2*(np.random.rand(100) - 0.5)
# create lat/lon array
lon = np.linspace(-180., 180.0, 10)
lat = np.linspace(-90., 90., 10)
lon_lat = np.array(list(product(lon, lat)))
X_train, X_test, y_train, y_test, lon_lat_train, lon_lat_test = \
train_test_split(X, y, lon_lat, train_size=0.7, random_state=10)
for ml_model, krige_method in self.methods():
reg_kr_model = RegressionKriging(regression_model=ml_model,
method=krige_method,
n_closest_points=2)
reg_kr_model.fit(X_train, lon_lat_train, y_train)
assert reg_kr_model.score(X_test, lon_lat_test, y_test) > 0.25
def test_krige_housing(self):
from pykrige.rk import RegressionKriging
from pykrige.compat import train_test_split
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
# take only first 1000
p = housing['data'][:1000, :-2]
x = housing['data'][:1000, -2:]
target = housing['target'][:1000]
p_train, p_test, y_train, y_test, x_train, x_test = \
train_test_split(p, target, x, train_size=0.7,
random_state=10)
for ml_model, krige_method in self.methods():
reg_kr_model = RegressionKriging(regression_model=ml_model,
method=krige_method,
n_closest_points=2)
reg_kr_model.fit(p_train, x_train, y_train)
if krige_method == 'ordinary':
assert reg_kr_model.score(p_test, x_test, y_test) > 0.5
else:
assert reg_kr_model.score(p_test, x_test, y_test) > 0.0
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ver228/tierpsy-tracker | tierpsy/features/tierpsy_features/path.py | 1 | 13169 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 29 15:54:00 2017
@author: ajaver
"""
import numpy as np
import cv2
import pandas as pd
from scipy.interpolate import interp1d
from .curvatures import curvature_grad
from .postures import get_length
from .helper import DataPartition, get_n_worms_estimate
path_curvature_columns = ['path_curvature_body',
'path_curvature_tail',
'path_curvature_midbody',
'path_curvature_head'
]
path_curvature_columns_aux = ['coord_x_body', 'coord_y_body',
'coord_x_tail', 'coord_y_tail',
'coord_x_midbody', 'coord_y_midbody',
'coord_x_head', 'coord_y_head'
]
DFLT_ARGS = dict(
path_step = 11,
path_grad_window = 5,
clip_val_body_lengths = 20,
bin_size_microns = 250,
bin_size_body_lengths = 0.25
)
#%%
def _h_path_curvature(skeletons,
body_length = None,
partition_str = 'body',
path_step = DFLT_ARGS['path_step'],
path_grad_window = DFLT_ARGS['path_grad_window'],
_is_debug = False):
if body_length is None:
#caculate the length if it is not given
body_length = np.nanmedian(get_length(skeletons))
#clip_val = clip_val_body_lengths/body_length
p_obj = DataPartition(n_segments=skeletons.shape[1])
body_coords = p_obj.apply(skeletons, partition_str, func=np.mean)
xx = body_coords[:,0]
yy = body_coords[:,1]
tt = np.arange(body_coords.shape[0])
#empty array return
if body_coords.size == 0 or np.all(np.isnan(body_coords)):
return np.full_like(tt, np.nan), body_coords
#interpolate nan values
good = ~np.isnan(xx)
x_i = xx[good]
y_i = yy[good]
t_i = tt[good]
t_i = np.hstack([-1, t_i, body_coords.shape[0]])
x_i = np.hstack([x_i[0], x_i, x_i[-1]])
y_i = np.hstack([y_i[0], y_i, y_i[-1]])
fx = interp1d(t_i, x_i)
fy = interp1d(t_i, y_i)
xx_i = fx(tt)
yy_i = fy(tt)
# calculate the cumulative length for each segment in the curve
dx = np.diff(xx_i)
dy = np.diff(yy_i)
dr = np.sqrt(dx * dx + dy * dy)
lengths = np.cumsum(dr)
lengths = np.hstack((0, lengths))
fx = interp1d(lengths, xx_i)
fy = interp1d(lengths, yy_i)
ft = interp1d(lengths, tt)
sub_lengths = np.arange(lengths[0], lengths[-1], path_step)
#there is not enough data to calculate the curvature
if len(sub_lengths) <= 4*path_grad_window:
return np.full(skeletons.shape[0], np.nan), body_coords
xs = fx(sub_lengths)
ys = fy(sub_lengths)
ts = ft(sub_lengths)
curve = np.vstack((xs, ys)).T
curvature_r = curvature_grad(curve,
points_window = path_grad_window,
axis=0,
is_nan_border=False)
#clip values to remove regions with extremely large curvatures (typically short reversars)
#curvature_r = np.clip(curvature_r, -clip_val,clip_val)
ts_i = np.hstack((-1, ts, tt[-1] + 1))
c_i = np.hstack((curvature_r[0], curvature_r, curvature_r[-1]))
curvature_t = interp1d(ts_i, c_i)(tt)
if _is_debug:
import matplotlib.pylab as plt
from matplotlib.collections import LineCollection
#path_curvature[np.isnan(worm_features['speed'])] = np.nan
#path_curvature = np.clip(curvature_t, -0.02, 0.02)
path_curvature = curvature_t
curv_range = (np.nanmin(path_curvature), np.nanmax(path_curvature))
points = body_coords.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments,
cmap = plt.get_cmap('plasma'),
norm = plt.Normalize(*curv_range))
lc.set_array(path_curvature)
lc.set_linewidth(2)
plt.figure(figsize=(20, 5))
plt.subplot(1,2,1)
plt.gca().add_collection(lc)
plt.xlim(3000, 11000)
plt.ylim(3000, 11000)
plt.axis('equal')
plt.subplot(1,2,2)
plt.plot(path_curvature)
return curvature_t, body_coords
def get_path_curvatures(skeletons, **argkws):
path_curvatures = []
path_coords = []
body_length = np.nanmedian(get_length(skeletons))
for partition_str in ['body', 'tail', 'midbody', 'head']:
path_curv, coords = \
_h_path_curvature(skeletons,
body_length,
partition_str = partition_str,
**argkws
)
path_curvatures.append(('path_curvature_' + partition_str, path_curv))
path_coords.append(('coord_x_' + partition_str, coords[...,0]))
path_coords.append(('coord_y_' + partition_str, coords[...,1]))
cols, dat = zip(*path_curvatures)
path_curvatures_df = pd.DataFrame(np.array(dat).T, columns=cols)
cols, dat = zip(*path_coords)
path_coords_df = pd.DataFrame(np.array(dat).T, columns=cols)
return path_curvatures_df, path_coords_df
def _test_plot_cnts_maps(ventral_contour, dorsal_contour):
import matplotlib.pylab as plt
pix2microns = 10
x_min = np.nanmin(ventral_contour[:, :, 0])
x_max = np.nanmax(ventral_contour[:, :, 0])
y_min = np.nanmin(dorsal_contour[:, :, 1])
y_max = np.nanmax(dorsal_contour[:, :, 1])
rx = int(round((x_max - x_min)/pix2microns))
ry = int(round((y_max - y_min)/pix2microns))
size_counts = (rx + 1, ry + 1)
partitions_dflt = {'head': (0, 8),
'neck': (8, 16),
'midbody': (16, 33),
'hips': (33, 41),
'tail': (41, 49),
'all': (0, 49),
'body': (8, 41)
}
all_cnts = {}
for part, rr in partitions_dflt.items():
p_vc = ventral_contour[:, rr[0]:rr[1], :].astype(np.float32)
p_dc = dorsal_contour[:, rr[0]:rr[1], :].astype(np.float32)
h = np.hstack((p_vc[:, ], p_dc[:, ::-1, :], p_vc[:, 0, :][:, None, :]))
cnts = [np.round((x-np.array((x_min, y_min))[None, :])/pix2microns) for x in h]
counts = np.zeros(size_counts, np.float32)
for ii, cnt in enumerate(cnts):
if np.any(np.isnan(cnt)):
continue
cc = np.zeros(size_counts, np.float32)
cc = cv2.drawContours(cc, [cnt[:, None, :].astype(np.int)], contourIdx=-1, thickness=-1, color=1)
counts += cc
plt.figure()
plt.imshow(counts, interpolation='none')
plt.title(part)
all_cnts[part] = counts
print(part)
#%%
def _get_path_coverage_feats(timeseries_data, bin_size_microns):
#find the columns that correspond to curvature_coords
cols = [x for x in timeseries_data if x in path_curvature_columns_aux]
path_coords_df = timeseries_data[cols]
bin_vals = ((path_coords_df - path_coords_df.mean())/bin_size_microns).round()
try:
bin_vals = bin_vals.fillna(method='ffill').fillna(method='bfill').astype(np.int)
except ValueError:
#likely full of nan's return empty
return {}
path_coverage_feats = {}
# loop over worm body parts
for b_part in set(x.rpartition('_')[-1] for x in bin_vals.columns):
# get the binned coordinates of the given body part
# (the coordinates defining in which square of the grid the worm body part is)
dat = bin_vals[['coord_x_' + b_part,'coord_y_' + b_part]]
dat.columns = ['X', 'Y']
# groupby individual grid squares
gg = dat.groupby(["X", "Y"])
#here i am counting the number of times any worm enter to a given grid
# count the number of worm occurancies in each grid square throughout the video
grid_counts = gg.size().reset_index(name="Counts")
#cc = pd.crosstab(dat['X'], dat['Y'])
#now i want to assign a label to each grid each (worm_index, timestamp)
ind_bins = np.full(dat.shape[0], -1)
for ii, (k, vals) in enumerate(gg):
ind_bins[vals.index] = ii
df = timeseries_data[['worm_index']].copy()
df['ind_bins'] = ind_bins
#now i want to see the duration a given worm spend in each grid
grid_durations = []
for w, vec in df.groupby('worm_index'):
xx = vec['ind_bins'].values
xr = np.insert(xx[1:], xx.size-1, -1)
b_flags = xr!=xx
#b_id = xx[b_flags]
b_s = np.diff(np.insert(np.where(b_flags)[0], 0, -1))
grid_durations.append(b_s)
if grid_durations:
grid_durations = np.concatenate(grid_durations)
else:
grid_durations = np.zeros(0)
path_coverage_feats[b_part] = (grid_counts, grid_durations)
return path_coverage_feats
def get_path_extent_stats(timeseries_data, fps, is_normalized = False):
if is_normalized:
body_length = timeseries_data['length'].median()
bin_size_microns = DFLT_ARGS['bin_size_body_lengths']*body_length
area_per_grid = 1
is_norm_str = '_norm'
else:
bin_size_microns = DFLT_ARGS['bin_size_microns']
is_norm_str = ''
area_per_grid = bin_size_microns**2
path_coverage_feats = _get_path_coverage_feats(timeseries_data, bin_size_microns)
Q = [50, 95]
grid_stats = []
for b_part, (grid_counts, grid_durations) in path_coverage_feats.items():
if grid_durations.size > 0:
grid_transit_time = np.percentile(grid_durations, Q)/fps
else:
grid_transit_time = (np.nan, np.nan)
if grid_counts['Counts'].size > 0:
path_coverage = grid_counts['Counts'].size*area_per_grid
path_density = np.percentile(grid_counts['Counts'], Q)/grid_counts['Counts'].sum()
else:
path_coverage = np.nan
path_density = (np.nan, np.nan)
posfix = b_part + is_norm_str
grid_stats += [
(path_coverage, 'path_coverage_' + posfix),
(path_density[0], 'path_density_{}_{}th'.format(posfix, Q[0])),
(path_density[1], 'path_density_{}_{}th'.format(posfix, Q[1])),
(grid_transit_time[0], 'path_transit_time_{}_{}th'.format(posfix, Q[0])),
(grid_transit_time[1], 'path_transit_time_{}_{}th'.format(posfix, Q[1])),
]
grid_stats_s = pd.Series(*list(zip(*grid_stats)))
return grid_stats_s
#%%
if __name__ == '__main__':
import os
import tables
#%%
#_test_plot_cnts_maps(ventral_contour, dorsal_contour)
base_dir = '/Users/ajaver/OneDrive - Imperial College London/tierpsy_features/test_data/multiworm'
skeletons_file = os.path.join(base_dir, 'MY23_worms5_food1-10_Set4_Pos5_Ch4_29062017_140148_skeletons.hdf5')
features_file = skeletons_file.replace('_skeletons.hdf5', '_featuresN.hdf5')
#features_file = '/Users/ajaver/OneDrive - Imperial College London/tierpsy_features/test_data/multiworm/MY16_worms5_food1-10_Set5_Pos4_Ch1_02062017_131004_featuresN.hdf5'
features_file = '/Users/ajaver/OneDrive - Imperial College London/tierpsy_features/test_data/multiworm/170817_matdeve_exp7co1_12_Set0_Pos0_Ch1_17082017_140001_featuresN.hdf5'
with pd.HDFStore(features_file, 'r') as fid:
blob_features = fid['/blob_features']
trajectories_data = fid['/trajectories_data']
timeseries_data = fid['/timeseries_data']
fps = fid.get_storer('/trajectories_data').attrs['fps']
good = trajectories_data['skeleton_id']>=0
trajectories_data = trajectories_data[good]
blob_features = blob_features[good]
if False:
trajectories_data_g = trajectories_data.groupby('worm_index_joined')
for worm_index in trajectories_data_g.groups.keys():
worm_index = 4#695
worm_data = trajectories_data_g.get_group(worm_index)
skel_id = worm_data['skeleton_id'].values
with tables.File(features_file, 'r') as fid:
skeletons = fid.get_node('/coordinates/skeletons')[skel_id, :, :]
worm_features = timeseries_data.loc[skel_id]
path_curvatures_df, path_coords_df = get_path_curvatures(skeletons, _is_debug=True)
break
#%%
get_path_extent_stats(timeseries_data)
| mit |
jstoxrocky/statsmodels | statsmodels/sandbox/regression/tests/test_gmm.py | 8 | 24983 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 04 13:19:01 2013
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lmap
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from statsmodels import iolib
from statsmodels.tools.tools import add_constant
from statsmodels.regression.linear_model import OLS
import statsmodels.sandbox.regression.gmm as gmm
def get_griliches76_data():
import os
curdir = os.path.split(__file__)[0]
path = os.path.join(curdir, 'griliches76.dta')
griliches76_data = iolib.genfromdta(path, missing_flt=np.NaN, pandas=True)
# create year dummies
years = griliches76_data['year'].unique()
N = griliches76_data.shape[0]
for yr in years:
griliches76_data['D_%i' %yr] = np.zeros(N)
for i in range(N):
if griliches76_data.ix[i, 'year'] == yr:
griliches76_data.ix[i, 'D_%i' %yr] = 1
else:
pass
griliches76_data['const'] = 1
X = add_constant(griliches76_data[['s', 'iq', 'expr', 'tenure', 'rns',
'smsa', 'D_67', 'D_68', 'D_69', 'D_70',
'D_71', 'D_73']],
prepend=True) # for R comparison
#prepend=False) # for Stata comparison
Z = add_constant(griliches76_data[['expr', 'tenure', 'rns', 'smsa', \
'D_67', 'D_68', 'D_69', 'D_70', 'D_71',
'D_73', 'med', 'kww', 'age', 'mrt']])
Y = griliches76_data['lw']
return Y, X, Z
# use module global to load only once
yg_df, xg_df, zg_df = get_griliches76_data()
endog = np.asarray(yg_df, dtype=float) # TODO: why is yg_df float32
exog, instrument = lmap(np.asarray, [xg_df, zg_df])
assert exog.dtype == np.float64
assert instrument.dtype == np.float64
# from R
#-----------------
varnames = np.array(["(Intercept)", "s", "iq", "expr", "tenure", "rns", "smsa", "D_67", "D_68", "D_69", "D_70",
"D_71", "D_73"])
params = np.array([ 4.03350989, 0.17242531, -0.00909883, 0.04928949, 0.04221709,
-0.10179345, 0.12611095, -0.05961711, 0.04867956, 0.15281763,
0.17443605, 0.09166597, 0.09323977])
bse = np.array([ 0.31816162, 0.02091823, 0.00474527, 0.00822543, 0.00891969,
0.03447337, 0.03119615, 0.05577582, 0.05246796, 0.05201092,
0.06027671, 0.05461436, 0.05767865])
tvalues = np.array([ 12.6775501, 8.2428242, -1.9174531, 5.9923305, 4.7330205,
-2.9528144, 4.0425165, -1.0688701, 0.9277959, 2.9381834,
2.8939212, 1.6784225, 1.6165385])
pvalues = np.array([ 1.72360000e-33, 7.57025400e-16, 5.55625000e-02,
3.21996700e-09, 2.64739100e-06, 3.24794100e-03,
5.83809900e-05, 2.85474400e-01, 3.53813900e-01,
3.40336100e-03, 3.91575100e-03, 9.36840200e-02,
1.06401300e-01])
#-----------------
def test_iv2sls_r():
mod = gmm.IV2SLS(endog, exog, instrument)
res = mod.fit()
# print(res.params)
# print(res.params - params)
n, k = exog.shape
assert_allclose(res.params, params, rtol=1e-7, atol=1e-9)
# TODO: check df correction
#assert_allclose(res.bse * np.sqrt((n - k) / (n - k - 1.)), bse,
assert_allclose(res.bse, bse, rtol=0, atol=3e-7)
def test_ivgmm0_r():
n, k = exog.shape
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(np.ones(exog.shape[1], float), maxiter=0, inv_weights=w0inv,
optim_method='bfgs',
optim_args={'gtol':1e-8, 'disp': 0})
assert_allclose(res.params, params, rtol=1e-4, atol=1e-4)
# TODO : res.bse and bse are not the same, rtol=0.09 is large in this case
#res.bse is still robust?, bse is not a sandwich ?
assert_allclose(res.bse, bse, rtol=0.09, atol=0)
score = res.model.score(res.params, w0)
assert_allclose(score, np.zeros(score.shape), rtol=0, atol=5e-6) # atol=1e-8) ??
def test_ivgmm1_stata():
# copied constant to the beginning
params_stata = np.array(
[ 4.0335099 , 0.17242531, -0.00909883, 0.04928949, 0.04221709,
-0.10179345, 0.12611095, -0.05961711, 0.04867956, 0.15281763,
0.17443605, 0.09166597, 0.09323976])
# robust bse with gmm onestep
bse_stata = np.array(
[ 0.33503289, 0.02073947, 0.00488624, 0.0080498 , 0.00946363,
0.03371053, 0.03081138, 0.05171372, 0.04981322, 0.0479285 ,
0.06112515, 0.0554618 , 0.06084901])
n, k = exog.shape
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
w0 = np.linalg.inv(w0inv)
start = OLS(endog, exog).fit().params
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv, optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
# move constant to end for Stata
idx = lrange(len(params))
idx = idx[1:] + idx[:1]
exog_st = exog[:, idx]
class TestGMMOLS(object):
@classmethod
def setup_class(self):
exog = exog_st # with const at end
res_ols = OLS(endog, exog).fit()
# use exog as instrument
nobs, k_instr = exog.shape
w0inv = np.dot(exog.T, exog) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, exog)
res = mod.fit(np.ones(exog.shape[1], float), maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
self.res2 = res_ols
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=5e-4, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=1e-5)
n = res1.model.exog.shape[0]
dffac = 1#np.sqrt((n - 1.) / n) # currently different df in cov calculation
assert_allclose(res1.bse * dffac, res2.HC0_se, rtol=5e-6, atol=0)
assert_allclose(res1.bse * dffac, res2.HC0_se, rtol=0, atol=1e-7)
def test_other(self):
res1, res2 = self.res1, self.res2
class CheckGMM(object):
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
n = res1.model.exog.shape[0]
dffac = 1 #np.sqrt((n - 1.) / n) # currently different df in cov calculation
rtol, atol = self.bse_tol
assert_allclose(res1.bse * dffac, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse * dffac, res2.bse, rtol=0, atol=atol)
#skip temporarily
def _est_other(self):
res1, res2 = self.res1, self.res2
assert_allclose(res1.q, res2.Q, rtol=5e-6, atol=0)
assert_allclose(res1.jval, res2.J, rtol=5e-5, atol=0)
class TestGMMSt1(CheckGMM):
@classmethod
def setup_class(self):
#self.bse_tol = [5e-7, 5e-7]
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=10, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
self.res1 = res10
from .results_gmm_griliches_iter import results
self.res2 = results
class TestGMMStTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
self.res1 = res10
from .results_gmm_griliches import results_twostep as results
self.res2 = results
class TestGMMStTwostepNO(CheckGMM):
#with Stata default `has_optimal_weights=False`
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [1e-6, 5e-5]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res10
from .results_gmm_griliches import results_twostep as results
self.res2 = results
class TestGMMStOnestep(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
self.params_tol = [5e-4, 5e-5]
self.bse_tol = [7e-3, 5e-4]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs',
optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_bse_other(self):
res1, res2 = self.res1, self.res2
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=False))))
#weights=res1.weights))))
# TODO: doesn't look different
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
#nobs = instrument.shape[0]
#w0inv = np.dot(instrument.T, instrument) / nobs
q = self.res1.model.gmmobjective(self.res1.params, np.linalg.inv(self.res1.weights))
#assert_allclose(q, res2.Q, rtol=5e-6, atol=0)
class TestGMMStOnestepNO(CheckGMM):
# matches Stats's defaults wargs={'centered':False}, has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
self.params_tol = [1e-5, 1e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
class TestGMMStOneiter(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-4, 5e-5]
self.bse_tol = [7e-3, 5e-4]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_bse_other(self):
res1, res2 = self.res1, self.res2
moms = res1.model.momcond(res1.params)
w = res1.model.calc_weightmatrix(moms)
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=False,
weights=res1.weights))))
# TODO: doesn't look different
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=False,
#use_weights=True #weights=w
))))
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
#This doesn't replicate Stata oneway either
nobs = instrument.shape[0]
w0inv = np.dot(instrument.T, instrument) / nobs
q = self.res1.model.gmmobjective(self.res1.params, w)#self.res1.weights)
#assert_allclose(q, res2.Q, rtol=5e-6, atol=0)
class TestGMMStOneiterNO(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [1e-5, 1e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
#------------ Crosscheck subclasses
class TestGMMStOneiterNO_Linear(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-9, 1e-9]
self.bse_tol = [5e-10, 1e-10]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.LinearIVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res3 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
class TestGMMStOneiterNO_Nonlinear(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [5e-6, 1e-1]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
def func(params, exog):
return np.dot(exog, params)
mod = gmm.NonlinearIVGMM(endog, exog, instrument, func)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res3 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_score(self):
params = self.res1.params * 1.1
weights = self.res1.weights
sc1 = self.res1.model.score(params, weights)
sc2 = super(self.res1.model.__class__, self.res1.model).score(params,
weights)
assert_allclose(sc1, sc2, rtol=1e-6, atol=0)
assert_allclose(sc1, sc2, rtol=0, atol=1e-7)
# score at optimum
sc1 = self.res1.model.score(self.res1.params, weights)
assert_allclose(sc1, np.zeros(len(params)), rtol=0, atol=1e-8)
class TestGMMStOneiterOLS_Linear(CheckGMM):
@classmethod
def setup_class(self):
# replicating OLS by GMM - high agreement
self.params_tol = [1e-11, 1e-12]
self.bse_tol = [1e-13, 1e-13]
exog = exog_st # with const at end
res_ols = OLS(endog, exog).fit()
#Note: start is irrelevant but required
start = np.ones(len(res_ols.params))
nobs, k_instr = instrument.shape
w0inv = np.dot(exog.T, exog) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.LinearIVGMM(endog, exog, exog)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
#optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
optim_args={'disp': 0},
weights_method='iid',
wargs={'centered':False, 'ddof':'k_params'},
has_optimal_weights=True)
self.res1 = res
#from .results_gmm_griliches import results_onestep as results
#self.res2 = results
self.res2 = res_ols
#------------------
class TestGMMSt2(object):
# this looks like an old version, trying out different comparisons
# of options with Stats
@classmethod
def setup_class(self):
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=2, inv_weights=w0inv,
wargs={'ddof':0, 'centered':False},
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_ivreg2_griliches import results_gmm2s_robust as results
self.res2 = results
# TODO: remove after testing, compare bse from 1 iteration
# see test_basic
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
wargs={'ddof':0, 'centered':False},
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res3 = res
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=5e-05, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=5e-06)
n = res1.model.exog.shape[0]
# TODO: check df correction np.sqrt(745./758 )*res1.bse matches better
dffact = np.sqrt(745. / 758 )
assert_allclose(res1.bse * dffact, res2.bse, rtol=5e-03, atol=0)
assert_allclose(res1.bse * dffact, res2.bse, rtol=0, atol=5e-03)
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=True,
weights=res1.weights))))
assert_allclose(res1.bse, res2.bse, rtol=5e-01, atol=0)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=True,
weights=res1.weights,
use_weights=True))))
assert_allclose(res1.bse, res2.bse, rtol=5e-02, atol=0)
# TODO: resolve this
# try bse from previous step, is closer to Stata
# guess: Stata ivreg2 doesn't calc for bse update after final iteration
# need better test case, bse difference is close to numerical optimization precision
assert_allclose(self.res3.bse, res2.bse, rtol=5e-05, atol=0)
assert_allclose(self.res3.bse, res2.bse, rtol=0, atol=5e-06)
# TODO; tvalues are not available yet, no inheritance
#assert_allclose(res1.tvalues, res2.tvalues, rtol=5e-10, atol=0)
class CheckIV2SLS(object):
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=1e-9, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=1e-10)
n = res1.model.exog.shape[0]
assert_allclose(res1.bse, res2.bse, rtol=1e-10, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=1e-11)
assert_allclose(res1.tvalues, res2.tvalues, rtol=5e-10, atol=0)
def test_other(self):
res1, res2 = self.res1, self.res2
assert_allclose(res1.rsquared, res2.r2, rtol=1e-7, atol=0)
assert_allclose(res1.rsquared_adj, res2.r2_a, rtol=1e-7, atol=0)
# TODO: why is fvalue different, IV2SLS uses inherited linear
assert_allclose(res1.fvalue, res2.F, rtol=1e-10, atol=0)
assert_allclose(res1.f_pvalue, res2.Fp, rtol=1e-8, atol=0)
assert_allclose(np.sqrt(res1.mse_resid), res2.rmse, rtol=1e-10, atol=0)
assert_allclose(res1.ssr, res2.rss, rtol=1e-10, atol=0)
assert_allclose(res1.uncentered_tss, res2.yy, rtol=1e-10, atol=0)
assert_allclose(res1.centered_tss, res2.yyc, rtol=1e-10, atol=0)
assert_allclose(res1.ess, res2.mss, rtol=1e-9, atol=0)
assert_equal(res1.df_model, res2.df_m)
assert_equal(res1.df_resid, res2.df_r)
# TODO: llf raise NotImplementedError
#assert_allclose(res1.llf, res2.ll, rtol=1e-10, atol=0)
def test_hypothesis(self):
res1, res2 = self.res1, self.res2
restriction = np.eye(len(res1.params))
res_t = res1.t_test(restriction)
assert_allclose(res_t.tvalue, res1.tvalues, rtol=1e-12, atol=0)
assert_allclose(res_t.pvalue, res1.pvalues, rtol=1e-12, atol=0)
res_f = res1.f_test(restriction[:-1]) # without constant
# TODO res1.fvalue problem, see issue #1104
assert_allclose(res_f.fvalue, res1.fvalue, rtol=1e-12, atol=0)
assert_allclose(res_f.pvalue, res1.f_pvalue, rtol=1e-12, atol=0)
assert_allclose(res_f.fvalue, res2.F, rtol=1e-10, atol=0)
assert_allclose(res_f.pvalue, res2.Fp, rtol=1e-08, atol=0)
def test_hausman(self):
res1, res2 = self.res1, self.res2
hausm = res1.spec_hausman()
# hausman uses se2 = ssr / nobs, no df correction
assert_allclose(hausm[0], res2.hausman['DWH'], rtol=1e-11, atol=0)
assert_allclose(hausm[1], res2.hausman['DWHp'], rtol=1e-10, atol=1e-25)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestIV2SLSSt1(CheckIV2SLS):
@classmethod
def setup_class(self):
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
mod = gmm.IV2SLS(endog, exog, instrument)
res = mod.fit()
self.res1 = res
from .results_ivreg2_griliches import results_small as results
self.res2 = results
| bsd-3-clause |
ishalyminov/memn2n | babi_plus_dialog_single_legacy.py | 1 | 7822 | """Example running MemN2N on a single bAbI task.
Download tasks from facebook.ai/babi """
from __future__ import absolute_import
from __future__ import print_function
import random
from itertools import chain
from six.moves import range, reduce
import logging
from sklearn import metrics
import tensorflow as tf
import numpy as np
from dialog_data_utils import (
vectorize_data_dialog,
get_candidates_list,
load_task,
vectorize_answers
)
from memn2n import MemN2N
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__file__)
tf.flags.DEFINE_float(
"learning_rate",
0.01,
"Learning rate for Adam Optimizer."
)
tf.flags.DEFINE_float("epsilon", 1e-8, "Epsilon value for Adam Optimizer.")
tf.flags.DEFINE_float("max_grad_norm", 40.0, "Clip gradients to this norm.")
tf.flags.DEFINE_integer(
"evaluation_interval",
1,
"Evaluate and print results every x epochs"
)
tf.flags.DEFINE_integer("batch_size", 8, "Batch size for training.")
tf.flags.DEFINE_integer("hops", 1, "Number of hops in the Memory Network.")
tf.flags.DEFINE_integer("epochs", 3, "Number of epochs to train for.")
tf.flags.DEFINE_integer(
"embedding_size",
128,
"Embedding size for embedding matrices."
)
tf.flags.DEFINE_integer("memory_size", 64, "Maximum size of memory.")
tf.flags.DEFINE_integer("task_id", 1, "bAbI task id, 1 <= id <= 6")
tf.flags.DEFINE_integer("random_state", 273, "Random state.")
tf.flags.DEFINE_string(
"data_dir",
"../babi_tools/babi_plus/",
"Directory containing bAbI tasks"
)
tf.flags.DEFINE_string(
"data_dir_plus",
"../babi_tools/babi_plus/",
"Directory containing bAbI+ tasks"
)
FLAGS = tf.flags.FLAGS
random.seed(FLAGS.random_state)
np.random.seed(FLAGS.random_state)
print("Started Task:", FLAGS.task_id)
# task data
train_babi, dev_babi, test_babi, test_oov_babi = load_task(FLAGS.data_dir, FLAGS.task_id)
train_plus, dev_plus, test_plus, test_oov_plus = load_task(FLAGS.data_dir_plus, FLAGS.task_id)
all_dialogues_babi = train_babi + dev_babi + test_babi + test_oov_babi
all_dialogues_babi_plus = train_plus + dev_plus + test_plus + test_oov_plus
data = reduce(
lambda x, y: x + y,
all_dialogues_babi + all_dialogues_babi_plus,
[]
)
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([len(s) for s, _, _ in data]))
sentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data))) + 2
query_size = max(map(len, (q for _, q, _ in data)))
memory_size = min(FLAGS.memory_size, max_story_size)
answer_candidates = get_candidates_list(FLAGS.data_dir)
vocab = reduce(
lambda x, y: x | y,
(set(list(chain.from_iterable(s)) + q + a) for s, q, a in data)
)
vocab |= reduce(
lambda x, y: x | y,
[set(answer.split()) for answer in answer_candidates]
)
vocab = sorted(vocab)
word_idx = {c: i + 1 for i, c in enumerate(vocab)}
answer_idx = {
candidate: i + 1
for i, candidate in enumerate(answer_candidates)
}
vocab_size = len(word_idx) + 1 # +1 for nil word
answer_vocab_size = len(answer_idx) + 1
sentence_size = max(query_size, sentence_size) # for the position
answers_vectorized = vectorize_answers(answer_candidates, word_idx, sentence_size)
print("Longest sentence length", sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
# in_train_sqa - trainset
# in_train_eval_sqa - trainset for evaluation (may be API calls only)
# # in_test_sqa - testset for evaluation
def train_model(in_model, in_train_sqa, in_train_eval_sqa, in_test_sqa, in_batches):
best_train_accuracy, best_test_accuracy = 0.0, 0.0
for t in range(1, FLAGS.epochs+1):
s_train, q_train, a_train = in_train_sqa
s_train_eval, q_train_eval, a_train_eval = in_train_eval_sqa
s_test, q_test, a_test = in_test_sqa
train_labels = np.argmax(a_train, axis=1)
train_eval_labels = np.argmax(a_train_eval, axis=1)
test_labels = np.argmax(a_test, axis=1)
np.random.shuffle(in_batches)
total_cost = 0.0
for start, end in in_batches:
s = s_train[start:end]
q = q_train[start:end]
a = a_train[start:end]
# back-propagating each batch
cost_t = in_model.batch_fit(s, q, a)
total_cost += cost_t
if t % FLAGS.evaluation_interval == 0:
# evaluate on the whole trainset
train_preds = in_model.predict(s_train_eval, q_train_eval)
train_acc = metrics.accuracy_score(
train_preds,
train_eval_labels
)
# evaluating on the whole testset
test_preds = in_model.predict(s_test, q_test)
test_acc = metrics.accuracy_score(
test_preds,
test_labels
)
logger.info('-----------------------')
logger.info('Epoch:\t{}'.format(t))
logger.info('Total Cost:\t{}'.format(total_cost))
logger.info('Training Accuracy:\t{}'.format(train_acc))
logger.info('Testing Accuracy:\t{}'.format(test_acc))
logger.info('-----------------------')
if best_test_accuracy < test_acc:
best_train_accuracy, best_test_accuracy = train_acc, test_acc
return best_train_accuracy, best_test_accuracy
def main():
dialogues_train = map(lambda x: x, train_babi)
dialogues_train_eval = map(lambda x: [x[-1]], train_babi)
# testing only on API calls?
dialogues_test = map(lambda x: [x[-1]], test_plus)
data_train = reduce(lambda x, y: x + y, dialogues_train, [])
data_train_eval = reduce(lambda x, y: x + y, dialogues_train_eval, [])
data_test = reduce(lambda x, y: x + y, dialogues_test, [])
train_s, train_q, train_a = vectorize_data_dialog(
data_train,
word_idx,
answer_idx,
sentence_size,
memory_size
)
train_eval_s, train_eval_q, train_eval_a = vectorize_data_dialog(
data_train_eval,
word_idx,
answer_idx,
sentence_size,
memory_size
)
test_s, test_q, test_a = vectorize_data_dialog(
data_test,
word_idx,
answer_idx,
sentence_size,
memory_size
)
print("Training Size (dialogues)", len(dialogues_train))
print("Training/Evaluation Size (dialogues)", len(dialogues_train_eval))
print("Testing Size (dialogues)", len(dialogues_test))
print("Training Size (stories)", len(data_train))
print("Training/Evaluation Size (stories)", len(data_train_eval))
print("Testing Size (stories)", len(data_test))
tf.set_random_seed(FLAGS.random_state)
batch_size = FLAGS.batch_size
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=FLAGS.learning_rate
)
batches = zip(
range(0, len(data_train) - batch_size, batch_size),
range(batch_size, len(data_train), batch_size)
)
batches = [(start, end) for start, end in batches]
with tf.Session() as sess:
model = MemN2N(
batch_size,
vocab_size,
sentence_size,
memory_size,
FLAGS.embedding_size,
answers_vectorized,
session=sess,
hops=FLAGS.hops,
max_grad_norm=FLAGS.max_grad_norm,
optimizer=optimizer
)
best_accuracy_per_epoch = train_model(
model,
(train_s, train_q, train_a),
(train_eval_s, train_eval_q, train_eval_a),
(test_s, test_q, test_a),
batches
)
return best_accuracy_per_epoch
if __name__ == '__main__':
accuracies = main()
print ('train: {0:.3f}, test: {1:.3f}'.format(*accuracies))
| mit |
e-mission/e-mission-server | emission/analysis/intake/segmentation/trip_segmentation_methods/dwell_segmentation_time_filter.py | 2 | 16913 | from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from past.utils import old_div
import logging
import attrdict as ad
import numpy as np
import pandas as pd
import datetime as pydt
# Our imports
import emission.analysis.point_features as pf
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.core.wrapper.location as ecwl
import emission.analysis.intake.segmentation.restart_checking as eaisr
class DwellSegmentationTimeFilter(eaist.TripSegmentationMethod):
def __init__(self, time_threshold, point_threshold, distance_threshold):
"""
Determines segmentation points for points that were generated using a
time filter (i.e. report points every n seconds). This will *not* work for
points generated using a distance filter because it expects to have a
cluster of points to detect the trip end, and with a distance filter,
we will not get updates while we are still.
At least on android, we can get updates at a different frequency than
the "n" specified above. In particular:
a) we can get updates more frequently than "n" if there are other apps
that are requesting updates frequently - for example, while using a routing app.
b) we can get updates less frequently than "n" if there are bad/low
accuracy points that are filtered out.
So we use a combination of a time filter and a "number of points"
filter to detect the trip end.
The time_threshold indicates the number of seconds that we need to be
still before a trip end is detected.
The point_threshold indicates the number of prior points (after
filtering) that we need to be still for before a trip end is detected
The distance_threshold indicates the radius of the circle used to
detect that we are still. If all the points within the
time_threshold AND all the points within the point_threshold are
within the distance_threshold of each other, then we are still.
"""
self.time_threshold = time_threshold
self.point_threshold = point_threshold
self.distance_threshold = distance_threshold
def segment_into_trips(self, timeseries, time_query):
"""
Examines the timeseries database for a specific range and returns the
segmentation points. Note that the input is the entire timeseries and
the time range. This allows algorithms to use whatever combination of
data that they want from the sensor streams in order to determine the
segmentation points.
"""
filtered_points_pre_ts_diff_df = timeseries.get_data_df("background/filtered_location", time_query)
# Sometimes, we can get bogus points because data.ts and
# metadata.write_ts are off by a lot. If we don't do this, we end up
# appearing to travel back in time
# https://github.com/e-mission/e-mission-server/issues/457
filtered_points_df = filtered_points_pre_ts_diff_df[(filtered_points_pre_ts_diff_df.metadata_write_ts - filtered_points_pre_ts_diff_df.ts) < 1000]
filtered_points_df.reset_index(inplace=True)
transition_df = timeseries.get_data_df("statemachine/transition", time_query)
if len(transition_df) > 0:
logging.debug("transition_df = %s" % transition_df[["fmt_time", "transition"]])
else:
logging.debug("no transitions found. This can happen for continuous sensing")
self.last_ts_processed = None
logging.info("Last ts processed = %s" % self.last_ts_processed)
segmentation_points = []
last_trip_end_point = None
curr_trip_start_point = None
just_ended = True
prevPoint = None
for idx, row in filtered_points_df.iterrows():
currPoint = ad.AttrDict(row)
currPoint.update({"idx": idx})
logging.debug("-" * 30 + str(currPoint.fmt_time) + "-" * 30)
if curr_trip_start_point is None:
logging.debug("Appending currPoint because the current start point is None")
# segmentation_points.append(currPoint)
if just_ended:
if self.continue_just_ended(idx, currPoint, filtered_points_df):
# We have "processed" the currPoint by deciding to glom it
self.last_ts_processed = currPoint.metadata_write_ts
continue
# else:
sel_point = currPoint
logging.debug("Setting new trip start point %s with idx %s" % (sel_point, sel_point.idx))
curr_trip_start_point = sel_point
just_ended = False
last5MinsPoints_df = filtered_points_df[np.logical_and(
np.logical_and(
filtered_points_df.ts > currPoint.ts - self.time_threshold,
filtered_points_df.ts < currPoint.ts),
filtered_points_df.ts >= curr_trip_start_point.ts)]
# Using .loc here causes problems if we have filtered out some points and so the index is non-consecutive.
# Using .iloc just ends up including points after this one.
# So we reset_index upstream and use it here.
# We are going to use the last 8 points for now.
# TODO: Change this back to last 10 points once we normalize phone and this
last10Points_df = filtered_points_df.iloc[max(idx-self.point_threshold, curr_trip_start_point.idx):idx+1]
distanceToLast = lambda row: pf.calDistance(ad.AttrDict(row), currPoint)
timeToLast = lambda row: currPoint.ts - ad.AttrDict(row).ts
last5MinsDistances = last5MinsPoints_df.apply(distanceToLast, axis=1)
logging.debug("last5MinsDistances = %s with length %d" % (last5MinsDistances.to_numpy(), len(last5MinsDistances)))
last10PointsDistances = last10Points_df.apply(distanceToLast, axis=1)
logging.debug("last10PointsDistances = %s with length %d, shape %s" % (last10PointsDistances.to_numpy(),
len(last10PointsDistances),
last10PointsDistances.shape))
# Fix for https://github.com/e-mission/e-mission-server/issues/348
last5MinTimes = last5MinsPoints_df.apply(timeToLast, axis=1)
logging.debug("len(last10PointsDistances) = %d, len(last5MinsDistances) = %d" %
(len(last10PointsDistances), len(last5MinsDistances)))
logging.debug("last5MinsTimes.max() = %s, time_threshold = %s" %
(last5MinTimes.max() if len(last5MinTimes) > 0 else np.NaN, self.time_threshold))
if self.has_trip_ended(prevPoint, currPoint, timeseries, last10PointsDistances, last5MinsDistances, last5MinTimes):
(ended_before_this, last_trip_end_point) = self.get_last_trip_end_point(filtered_points_df,
last10Points_df, last5MinsPoints_df)
segmentation_points.append((curr_trip_start_point, last_trip_end_point))
logging.info("Found trip end at %s" % last_trip_end_point.fmt_time)
# We have processed everything up to the trip end by marking it as a completed trip
self.last_ts_processed = currPoint.metadata_write_ts
if ended_before_this:
# in this case, we end a trip at the previous point, and the next trip starts at this
# point, not the next one
just_ended = False
prevPoint = currPoint
curr_trip_start_point = currPoint
logging.debug("Setting new trip start point %s with idx %s" %
(currPoint, currPoint.idx))
else:
# We end a trip at the current point, and the next trip starts at the next point
just_ended = True
prevPoint = None
else:
prevPoint = currPoint
logging.debug("Iterated over all points, just_ended = %s, len(transition_df) = %s" %
(just_ended, len(transition_df)))
if not just_ended and len(transition_df) > 0:
stopped_moving_after_last = transition_df[(transition_df.ts > currPoint.ts) & (transition_df.transition == 2)]
logging.debug("looking after %s, found transitions %s" %
(currPoint.ts, stopped_moving_after_last))
if len(stopped_moving_after_last) > 0:
(unused, last_trip_end_point) = self.get_last_trip_end_point(filtered_points_df,
last10Points_df, None)
segmentation_points.append((curr_trip_start_point, last_trip_end_point))
logging.debug("Found trip end at %s" % last_trip_end_point.fmt_time)
# We have processed everything up to the trip end by marking it as a completed trip
self.last_ts_processed = currPoint.metadata_write_ts
return segmentation_points
def continue_just_ended(self, idx, currPoint, filtered_points_df):
"""
Normally, since the logic here and the
logic on the phone are the same, if we have detected a trip
end, any points after this are part of the new trip.
However, in some circumstances, notably in my data from 27th
August, there appears to be a mismatch and we get a couple of
points past the end that we detected here. So let's look for
points that are within the distance filter, and are at a
delta of a minute, and join them to the just ended trip instead of using them to
start the new trip
:param idx: Index of the current point
:param currPoint: current point
:param filtered_points_df: dataframe of filtered points
:return: True if we should continue the just ended trip, False otherwise
"""
if idx == 0:
return False
else:
prev_point = ad.AttrDict(filtered_points_df.iloc[idx - 1])
logging.debug("Comparing with prev_point = %s" % prev_point)
if pf.calDistance(prev_point, currPoint) < self.distance_threshold and \
currPoint.ts - prev_point.ts <= 60:
logging.info("Points %s and %s are within the distance filter and only 1 min apart so part of the same trip" %
(prev_point, currPoint))
return True
else:
return False
def has_trip_ended(self, prev_point, curr_point, timeseries, last10PointsDistances, last5MinsDistances, last5MinTimes):
# Another mismatch between phone and server. Phone stops tracking too soon,
# so the distance is still greater than the threshold at the end of the trip.
# But then the next point is a long time away, so we can split again (similar to a distance filter)
if prev_point is None:
logging.debug("prev_point is None, continuing trip")
else:
timeDelta = curr_point.ts - prev_point.ts
distDelta = pf.calDistance(prev_point, curr_point)
if timeDelta > 0:
speedDelta = old_div(distDelta, timeDelta)
else:
speedDelta = np.nan
speedThreshold = old_div(float(self.distance_threshold), self.time_threshold)
if eaisr.is_tracking_restarted_in_range(prev_point.ts, curr_point.ts, timeseries):
logging.debug("tracking was restarted, ending trip")
return True
ongoing_motion_check = len(eaisr.get_ongoing_motion_in_range(prev_point.ts, curr_point.ts, timeseries)) > 0
if timeDelta > 2 * self.time_threshold and not ongoing_motion_check:
logging.debug("lastPoint.ts = %s, currPoint.ts = %s, threshold = %s, large gap = %s, ongoing_motion_in_range = %s, ending trip" %
(prev_point.ts, curr_point.ts,self.time_threshold, curr_point.ts - prev_point.ts, ongoing_motion_check))
return True
# http://www.huffingtonpost.com/hoppercom/the-worlds-20-longest-non-stop-flights_b_5994268.html
# Longest flight is 17 hours, which is the longest you can go without cell reception
# And even if you split an air flight that long into two, you will get some untracked time in the
# middle, so that's good.
TWELVE_HOURS = 12 * 60 * 60
if timeDelta > TWELVE_HOURS:
logging.debug("prev_point.ts = %s, curr_point.ts = %s, TWELVE_HOURS = %s, large gap = %s, ending trip" %
(prev_point.ts, curr_point.ts, TWELVE_HOURS, curr_point.ts - prev_point.ts))
return True
if (timeDelta > 2 * self.time_threshold and # We have been here for a while
speedDelta < speedThreshold): # we haven't moved very much
logging.debug("prev_point.ts = %s, curr_point.ts = %s, threshold = %s, large gap = %s, ending trip" %
(prev_point.ts, curr_point.ts,self.time_threshold, curr_point.ts - prev_point.ts))
return True
else:
logging.debug("prev_point.ts = %s, curr_point.ts = %s, time gap = %s (vs %s), distance_gap = %s (vs %s), speed_gap = %s (vs %s) continuing trip" %
(prev_point.ts, curr_point.ts,
timeDelta, self.time_threshold,
distDelta, self.distance_threshold,
speedDelta, speedThreshold))
# The -30 is a fuzz factor intended to compensate for older clients
# where data collection stopped after 5 mins, so that we never actually
# see 5 mins of data
if (len(last10PointsDistances) < self.point_threshold - 1 or
len(last5MinsDistances) == 0 or
last5MinTimes.max() < self.time_threshold - 30):
logging.debug("Too few points to make a decision, continuing")
return False
# Normal end-of-trip case
logging.debug("last5MinsDistances.max() = %s, last10PointsDistance.max() = %s" %
(last5MinsDistances.max(), last10PointsDistances.max()))
if (last5MinsDistances.max() < self.distance_threshold and
last10PointsDistances.max() < self.distance_threshold):
return True
def get_last_trip_end_point(self, filtered_points_df, last10Points_df, last5MinsPoints_df):
ended_before_this = last5MinsPoints_df is None or len(last5MinsPoints_df) == 0
if ended_before_this:
logging.debug("trip end transition, so last 10 points are %s" % last10Points_df.index)
last10PointsMedian = np.median(last10Points_df.index)
last_trip_end_index = int(last10PointsMedian)
logging.debug("last5MinsPoints not found, last_trip_end_index = %s" % last_trip_end_index)
else:
last10PointsMedian = np.median(last10Points_df.index)
last5MinsPointsMedian = np.median(last5MinsPoints_df.index)
last_trip_end_index = int(min(last5MinsPointsMedian, last10PointsMedian))
logging.debug("last5MinsPoints and last10PointsMedian found, last_trip_end_index = %s" % last_trip_end_index)
# logging.debug("last5MinPoints.median = %s (%s), last10Points_df = %s (%s), sel index = %s" %
# (np.median(last5MinsPoints_df.index), last5MinsPoints_df.index,
# np.median(last10Points_df.index), last10Points_df.index,
# last_trip_end_index))
last_trip_end_point_row = filtered_points_df.iloc[last_trip_end_index]
last_trip_end_point = ad.AttrDict(filtered_points_df.iloc[last_trip_end_index])
logging.debug("Appending last_trip_end_point %s with index %s " %
(last_trip_end_point, last_trip_end_point_row.name))
return (ended_before_this, last_trip_end_point)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/offsetbox.py | 4 | 55635 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.path as mpath
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = list(zip(*wd_list))
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
# Clipping has not been implemented in the OffesetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from .cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from .cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
self.stale = True
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
@martist.Artist.axes.setter
def axes(self, ax):
# TODO deal with this better
martist.Artist.axes.fset(self, ax)
for c in self.get_children():
if c is not None:
c.axes = ax
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
self.stale = True
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if six.callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
self.stale = True
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
self.stale = True
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes. Can be one of ``top``, ``bottom``,
``left``, ``right``, ``center`` and ``baseline``
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes.
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str
Alignment of boxes.
mode : str
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
self.stale = True
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed. The children can be clipped at the
boundaries of the parent.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=False):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
*clip* : Whether to clip the children
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self._clip_children = clip
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
@property
def clip_children(self):
"""
If the children of this DrawingArea should be clipped
by DrawingArea bounding box.
"""
return self._clip_children
@clip_children.setter
def clip_children(self, val):
self._clip_children = bool(val)
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
if self.axes is not None:
a.axes = self.axes
fig = self.figure
if fig is not None:
a.set_figure(fig)
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
# At this point the DrawingArea has a transform
# to the display space so the path created is
# good for clipping children
tpath = mtransforms.TransformedPath(
mpath.Path([[0, 0], [0, self.height],
[self.width, self.height],
[self.width, 0]]),
self.get_transform())
for c in self._children:
if self._clip_children and not (c.clipbox or c._clippath):
c.set_clip_path(tpath)
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
Parameters
----------
s : str
a string to be displayed.
textprops : `~matplotlib.font_manager.FontProperties`, optional
multilinebaseline : bool, optional
If `True`, baseline for multiline text is adjusted so that
it is (approximatedly) center-aligned with singleline
text.
minimumdescent : bool, optional
If `True`, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"Set the text of this area as a string."
self._text.set_text(s)
self.stale = True
def get_text(self):
"Returns the string representation of this area's text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
self.stale = True
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
self.stale = True
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = list(xrange(11))
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : string
Text.
loc : str
Location code.
pad : float, optional
Pad between the text and the frame as fraction of the font
size.
borderpad : float, optional
Pad between the frame and the axes (or *bbox_to_anchor*).
prop : `matplotlib.font_manager.FontProperties`
Font properties.
Notes
-----
Other keyword parameters of `AnchoredOffsetbox` are also
allowed.
"""
if prop is None:
prop = {}
propkeys = list(six.iterkeys(prop))
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
OffsetBox.__init__(self)
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# FIXME dpi_cor is never used
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx * zoom, ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
martist.Artist.__init__(self, **kwargs)
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if xybox is None:
self.xybox = xy
else:
self.xybox = xybox
if boxcoords is None:
self.boxcoords = xycoords
else:
self.boxcoords = boxcoords
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
self.stale = True
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
self.stale = True
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
self.stale = True
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xybox
if isinstance(self.boxcoords, tuple):
xcoord, ycoord = self.boxcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
self.stale = False
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xyann
if isinstance(ann.anncoords, tuple):
xcoord, ycoord = ann.anncoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.anncoords)
self.ox, self.oy = ox0, oy0
self.annotation.anncoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = self.ox + dx, self.oy + dy
x, y = ann.xyann
def finalize_offset(self):
loc_in_canvas = self.annotation.xyann
self.annotation.anncoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted()
pos_axes_fraction = pos_axes_fraction.transform_point(loc_in_canvas)
self.annotation.xyann = tuple(pos_axes_fraction)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| mit |
jseabold/statsmodels | statsmodels/sandbox/distributions/otherdist.py | 5 | 10101 | '''Parametric Mixture Distributions
Created on Sat Jun 04 2011
Author: Josef Perktold
Notes:
Compound Poisson has mass point at zero
https://en.wikipedia.org/wiki/Compound_Poisson_distribution
and would need special treatment
need a distribution that has discrete mass points and contiuous range, e.g.
compound Poisson, Tweedie (for some parameter range),
pdf of Tobit model (?) - truncation with clipping
Question: Metaclasses and class factories for generating new distributions from
existing distributions by transformation, mixing, compounding
'''
import numpy as np
from scipy import stats
class ParametricMixtureD(object):
'''mixtures with a discrete distribution
The mixing distribution is a discrete distribution like scipy.stats.poisson.
All distribution in the mixture of the same type and parametrized
by the outcome of the mixing distribution and have to be a continuous
distribution (or have a pdf method).
As an example, a mixture of normal distributed random variables with
Poisson as the mixing distribution.
assumes vectorized shape, loc and scale as in scipy.stats.distributions
assume mixing_dist is frozen
initialization looks fragile for all possible cases of lower and upper
bounds of the distributions.
'''
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parametrized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support)
def rvs(self, size=1):
mrvs = self.mixing_dist.rvs(size)
#TODO: check strange cases ? this assumes continous integers
mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)
bd_args = tuple(md[mrvs_idx] for md in self.bd_args)
bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)
kwds = {'size':size}
kwds.update(bd_kwds)
rvs = self.base_dist.rvs(*self.bd_args, **kwds)
return rvs, mrvs_idx
def pdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
def cdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
#try:
class ClippedContinuous(object):
'''clipped continuous distribution with a masspoint at clip_lower
Notes
-----
first version, to try out possible designs
insufficient checks for valid arguments and not clear
whether it works for distributions that have compact support
clip_lower is fixed and independent of the distribution parameters.
The clip_lower point in the pdf has to be interpreted as a mass point,
i.e. different treatment in integration and expect function, which means
none of the generic methods for this can be used.
maybe this will be better designed as a mixture between a degenerate or
discrete and a continuous distribution
Warning: uses equality to check for clip_lower values in function
arguments, since these are floating points, the comparison might fail
if clip_lower values are not exactly equal.
We could add a check whether the values are in a small neighborhood, but
it would be expensive (need to search and check all values).
'''
def __init__(self, base_dist, clip_lower):
self.base_dist = base_dist
self.clip_lower = clip_lower
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if 'clip_lower' not in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds
def rvs(self, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
rvs_ = self.base_dist.rvs(*args, **kwds)
#same as numpy.clip ?
rvs_[rvs_ < clip_lower] = clip_lower
return rvs_
def pdf(self, x, *args, **kwds):
x = np.atleast_1d(x)
if 'clip_lower' not in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))
clip_mask = (x == self.clip_lower)
if np.any(clip_mask):
clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
pdf_raw[x < clip_lower] = 0
return pdf_raw
def cdf(self, x, *args, **kwds):
if 'clip_lower' not in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
cdf_raw = self.base_dist.cdf(x, *args, **kwds)
#not needed if equality test is used
## clip_mask = (x == self.clip_lower)
## if np.any(clip_mask):
## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
## pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
#if self.a is defined
cdf_raw[x < clip_lower] = 0
return cdf_raw
def sf(self, x, *args, **kwds):
if 'clip_lower' not in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
sf_raw = self.base_dist.sf(x, *args, **kwds)
sf_raw[x <= clip_lower] = 1
return sf_raw
def ppf(self, x, *args, **kwds):
raise NotImplementedError
def plot(self, x, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
mass = self.pdf(clip_lower, *args, **kwds)
xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))
import matplotlib.pyplot as plt
#x = np.linspace(-4, 4, 21)
#plt.figure()
plt.xlim(clip_lower-0.1, x.max())
#remove duplicate calculation
xpdf = self.pdf(x, *args, **kwds)
plt.ylim(0, max(mass, xpdf.max())*1.1)
plt.plot(xr, self.pdf(xr, *args, **kwds))
#plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))
plt.stem([clip_lower], [mass],
linefmt='b-', markerfmt='bo', basefmt='r-')
return
if __name__ == '__main__':
doplots = 1
#*********** Poisson-Normal Mixture
mdist = stats.poisson(2.)
bdist = stats.norm
bd_args_fn = lambda x: ()
#bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}
bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}
pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)
print(pd.pdf(1))
p, bp = pd.pdf(np.linspace(0,20,21))
pc, bpc = pd.cdf(np.linspace(0,20,21))
print(pd.rvs())
rvs, m = pd.rvs(size=1000)
if doplots:
import matplotlib.pyplot as plt
plt.hist(rvs, bins = 100)
plt.title('poisson mixture of normal distributions')
#********** clipped normal distribution (Tobit)
bdist = stats.norm
clip_lower_ = 0. #-0.5
cnorm = ClippedContinuous(bdist, clip_lower_)
x = np.linspace(1e-8, 4, 11)
print(cnorm.pdf(x))
print(cnorm.cdf(x))
if doplots:
#plt.figure()
#cnorm.plot(x)
plt.figure()
cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))
plt.title('clipped normal distribution')
fig = plt.figure()
for i, loc in enumerate([0., 0.5, 1.,2.]):
fig.add_subplot(2,2,i+1)
cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))
plt.title('clipped normal, loc = %3.2f' % loc)
loc = 1.5
rvs = cnorm.rvs(loc=loc, size=2000)
plt.figure()
plt.hist(rvs, bins=50)
plt.title('clipped normal rvs, loc = %3.2f' % loc)
#plt.show()
| bsd-3-clause |
Cronjaeger/coalescent-simulations | finiteSitesModell_investigations.py | 1 | 43035 | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 22:17:25 2015
@author: mathias
"""
import libCoal
import numpy as np
import matplotlib.pyplot as pl
from scipy.special import binom
from copy import deepcopy
from time import ctime
from math import log10
import networkx as nx
class coalescent_finiteSites(libCoal.coalescent):
'''
functionally identical to libCoal.coalescent. Only practical difference
being that the site frequency spectrum is not computed.
'''
def computeSFS(self):
pass
class simulator_KingmanFiniteSites(libCoal.simulateKingman):
def __init__(self,n,mutationRate,L,compute_ancestral_configuration_on_initialization = True,*args):
'''
n = number of individuals
mutationRate = Mutation rate of the coalescent.
T_Max = Time-horizon of the coalescent.
'''
self.n = n
self.mutationRate = mutationRate
# self.mergerRate = mergerRate
self.T_max = float('inf')
self.T_MRCA = float('inf')
self.args = args
self.SFS = np.zeros(n)
self.coal = coalescent_finiteSites(libCoal.partition([[i] for i in range(n)]))
# self.simulateCoalescent(self.args)
self.L = L
self.MRCA_seq = np.zeros(self.L,dtype=int)
self.site_mutationCount = np.zeros(self.L,dtype=int)
#self.mutations_with_site_and_type = [] # lists of tuples of the form [(time,lineage,site,k)]
self.sequences_mutationCount = np.zeros(self.n, dtype=int)
self.sequences = np.zeros((self.n,self.L),dtype=int)
self.compute_ancestral_configuration_on_initialization = compute_ancestral_configuration_on_initialization
self.simulateCoalescent()
# def preSimulationSteps(self):
# self.L = self.args[0]
# self.MRCA_seq = np.zeros(self.L,dtype=int)
# self.site_mutationCount = np.zeros(self.L,dtype=int)
# self.sequences_mutationCount = np.zeros(self.n, dtype=int)
# self.sequences = np.zeros((self.n,self.L),dtype=int)
# def getMutations_withSitesAndShifts(self):
# return deepcopy(self.mutations_with_site_and_type)
def postSimulationSteps(self):
if self.compute_ancestral_configuration_on_initialization:
self.computeAncestralConfiguration()
def computeAncestralConfiguration(self):
affectedSites = np.random.randint(self.L, size=len(self.coal.mutations))
mutationType = np.random.randint(1,4,size = len(self.coal.mutations))
self.coal.mutations = [self.coal.mutations[i] + (affectedSites[i], mutationType[i],i) for i in xrange(len(self.coal.mutations))]
for i in range(len(self.coal.mutations)):
j = affectedSites[i]
k = mutationType[i]
self.site_mutationCount[j] += 1
t, branch = self.coal.mutations[i][:2]
affectedSequences = self.coal.getStateNoCoppy(t).blocks[branch]
for sequenceIndex in affectedSequences:
self.sequences[sequenceIndex,j] += k
self.sequences[sequenceIndex,j] %= 4
self.sequences_mutationCount[sequenceIndex] += 1
def untillFirstXInconsistencies(self,computeS_seq = False,X = 2):
"""
Step 1, set up everything like we were running
computeAncestralConfiguration
"""
M = len(self.coal.mutations)
S = np.zeros((self.n,self.L),dtype=int)
S_old = np.array(S)
site_mut_count = np.zeros(self.L,dtype=int)
seq_mut_count = np.zeros(self.n, dtype=int)
# affectedSites = np.random.randint(self.L, size = M)
# mutationType = np.random.randint(1,4,size = M)
'''
Step 2, randomly iterate over the list of mutations.
Mutaions, untill the number of added mutations exceeds the number of
segregating sites by X.
'''
inconsistencyCount = 0
mutationCounter = 0
ignoredMutations = list(self.coal.mutations)
consideredMutations = []
deviant_mutations = []
# deviant_mutations_indices = []
# self.coal.mutations = []
typeCount = [0,0,0,0]
typeCountList = [list(typeCount)]
S_seq = [np.matrix(S)]
# inconsistentPairs = []
"""
type 0 : a column with 3 states
type 1 : a column with 2 states and >2 mutations
: (no incompatibility w. other states)
type 2 : creating a column with 2 states and incompatibilities
type 3 : create an invisible state
"""
while inconsistencyCount < X and mutationCounter < M:
m_index = np.random.randint(M - mutationCounter)
m_k = np.random.randint(1,4)
m_site = np.random.randint(self.L)
site_mut_count[m_site] += 1
mutation = ignoredMutations.pop(m_index) + (m_site, m_k,mutationCounter)
"""
Now a mutatiin has the following as its entries:
m[0] : time of mutation
m[1] : affected leneage
m[2] : affected site
m[3] : mutation type (k - shift mod 4)
m[4] : a counter -- number of mutations preceeding this one
"""
t,branch = mutation[:2]
affectedSequences = self.coal.getStateNoCoppy(t).blocks[branch]
for sequenceIndex in affectedSequences:
S[sequenceIndex,m_site] += m_k
S[sequenceIndex,m_site] %= 4
seq_mut_count[sequenceIndex] += 1
# if site_mut_count[m_site] > 1 and S[sequenceIndex,m_site] != 0:
if site_mut_count[m_site] > 1:
inconsistencyCount += 1
mutation += (inconsistencyCount,)
# inconsistent_columns_new = inconsistentColumnPairs(site_mut_count,S)
# inconsistent_columns_old = inconsistentColumnPairs([ i - int(i==m_site) for i in site_mut_count],S_old)
inconsistent_columns_new = inconsistentColumnPairs(S)
inconsistent_columns_old = inconsistentColumnPairs(S_old)
newInconsistencies = len(inconsistent_columns_new) > len(inconsistent_columns_old)
if len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) > 1:
typeCount[0] += 1
elif len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) == 1:
if newInconsistencies:
typeCount[2] += 1
else:
typeCount[1] += 1
#if S[sequenceIndex,m_site] == 0:
elif len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) == 0:
typeCount[3] += 1
else:
raise Error("An exception which defies categorization has occurred. This should not happen!")
typeCountList.append(list(typeCount))
deviant_mutations.append(mutation)
# deviant_mutations_indices.append(mutationCounter)
mutationCounter += 1
consideredMutations.append(mutation)
if computeS_seq: S_seq.append(np.matrix(S))
S_old[:,m_site] = S[:,m_site]
if computeS_seq:
newSeq = zip(consideredMutations,S_seq[1:])
newSeq.sort(cmp = lambda x,y: int(np.sign(x[0][0] - y[0][0])))
consideredMutations = [x[0] for x in newSeq]
S_seq = S_seq[:1] + [x[1] for x in newSeq]
else:
consideredMutations.sort(cmp = lambda x,y: int(np.sign(x[0] - y[0])))
newSeq = []
self.coal.mutations = consideredMutations
return {"S":S,
"mutCount_sites":site_mut_count,
"mutCount_sequences":seq_mut_count,
"Inconsistencies":inconsistencyCount,
"typeCount":typeCount,
"typeCount_arr":np.array(typeCount),
"coalescent" : deepcopy(self.coal),
"typeCountList" : typeCountList,
"newSeq" : newSeq,
"S_seq" : S_seq,
"deviant_mutations": deviant_mutations}
def until_k_visible_mutations(self,k = 10):
"""
Step 1, set up everything like we were running
computeAncestralConfiguration
"""
computeS_seq = False,
M = len(self.coal.mutations)
S = np.zeros((self.n,self.L),dtype=int)
S_old = np.array(S)
site_mut_count = np.zeros(self.L,dtype=int)
seq_mut_count = np.zeros(self.n, dtype=int)
# affectedSites = np.random.randint(self.L, size = M)
# mutationType = np.random.randint(1,4,size = M)
'''
Step 2, randomly iterate over the list of mutations,
untill enough mutations have occurred.
'''
inconsistencyCount = 0
mutationCounter = 0
ignoredMutations = list(self.coal.mutations)
consideredMutations = []
deviant_mutations = []
# deviant_mutations_indices = []
# self.coal.mutations = []
typeCount = [0,0,0,0]
# typeCountList = [list(typeCount)]
# S_seq = [np.matrix(S)]
# inconsistentPairs = []
"""
type 0 : a column with 3 states
type 1 : a column with 2 states and >2 mutations
: (no incompatibility w. other states)
type 2 : creating a column with 2 states and incompatibilities
type 3 : create an invisible state
"""
while mutationCounter - (typeCount[1]+typeCount[3]) < k and mutationCounter < M:
m_index = np.random.randint(M - mutationCounter)
m_k = np.random.randint(1,4)
m_site = np.random.randint(self.L)
site_mut_count[m_site] += 1
mutation = ignoredMutations.pop(m_index) + (m_site, m_k,mutationCounter)
"""
Now a mutatiin has the following as its entries:
m[0] : time of mutation
m[1] : affected leneage
m[2] : affected site
m[3] : mutation type (k - shift mod 4)
m[4] : a counter -- number of mutations preceeding this one
"""
t,branch = mutation[:2]
affectedSequences = self.coal.getStateNoCoppy(t).blocks[branch]
for sequenceIndex in affectedSequences:
S[sequenceIndex,m_site] += m_k
S[sequenceIndex,m_site] %= 4
seq_mut_count[sequenceIndex] += 1
# if site_mut_count[m_site] > 1 and S[sequenceIndex,m_site] != 0:
if site_mut_count[m_site] > 1:
inconsistencyCount += 1
mutation += (inconsistencyCount,)
# inconsistent_columns_new = inconsistentColumnPairs(site_mut_count,S)
# inconsistent_columns_old = inconsistentColumnPairs([ i - int(i==m_site) for i in site_mut_count],S_old)
inconsistent_columns_new = inconsistentColumnPairs(S)
inconsistent_columns_old = inconsistentColumnPairs(S_old)
newInconsistencies = len(inconsistent_columns_new) > len(inconsistent_columns_old)
if len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) > 1:
typeCount[0] += 1
elif len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) == 1:
if newInconsistencies:
typeCount[2] += 1
else:
typeCount[1] += 1
elif len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) == 0:
typeCount[3] += 1
else:
raise Error("An exception which defies categorization has occurred. This should not happen!")
# typeCountList.append(list(typeCount))
deviant_mutations.append(mutation)
# deviant_mutations_indices.append(mutationCounter)
mutationCounter += 1
consideredMutations.append(mutation)
# if computeS_seq: S_seq.append(np.matrix(S))
S_old[:,m_site] = S[:,m_site]
# if computeS_seq:
# newSeq = zip(consideredMutations,S_seq[1:])
# newSeq.sort(cmp = lambda x,y: int(np.sign(x[0][0] - y[0][0])))
# consideredMutations = [x[0] for x in newSeq]
# S_seq = S_seq[:1] + [x[1] for x in newSeq]
# else:
# consideredMutations.sort(cmp = lambda x,y: int(np.sign(x[0] - y[0])))
# newSeq = []
consideredMutations.sort(cmp = lambda x,y: int(np.sign(x[0] - y[0])))
# newSeq = []
self.coal.mutations = consideredMutations
return {"S":S,
"mutCount_sites":site_mut_count,
"mutCount_sequences":seq_mut_count,
"Inconsistencies":inconsistencyCount,
"typeCount":typeCount,
"typeCount_arr":np.array(typeCount),
"coalescent" : deepcopy(self.coal),
# "typeCountList" : typeCountList,
# "newSeq" : newSeq,
# "S_seq" : S_seq,
"deviant_mutations": deviant_mutations}
def until_k_mutations(self,k = 10):
'''
Samples K mutations from self.muitations, and discards the rest.
It then proceeds to compute the S-matrix associated with the coalescent.
'''
"""
Step 1, set up everything like we were running
computeAncestralConfiguration
"""
M = len(self.coal.mutations)
S = np.zeros((self.n,self.L),dtype=int)
S_old = np.array(S)
# site_mut_count = np.zeros(self.L,dtype=int)
# seq_mut_count = np.zeros(self.n, dtype=int)
# affectedSites = np.random.randint(self.L, size = M)
# mutationType = np.random.randint(1,4,size = M)
#
'''
Step 2, randomly iterate over the list of mutations.
Mutaions, untill the number of added mutations equals k
'''
# inconsistencyCount = 0
mutationCounter = 0
ignoredMutations = list(self.coal.mutations)
consideredMutations = []
# deviant_mutations = []
# deviant_mutations_indices = []
# self.coal.mutations = []
# typeCount = [0,0,0,0]
# typeCountList = [list(typeCount)]
S_seq = [np.matrix(S)]
# inconsistentPairs = []
"""
type 0 : a column with 3 states
type 1 : a column with 2 states and >2 mutations
: (no incompatibility w. other states)
type 2 : creating a column with 2 states and incompatibilities
type 3 : create an invisible state
"""
while mutationCounter < min(M,k):
m_index = np.random.randint(M - mutationCounter)
m_k = np.random.randint(1,4)
m_site = np.random.randint(self.L)
# site_mut_count[m_site] += 1
mutation = ignoredMutations.pop(m_index) + (m_site, m_k,mutationCounter)
"""
Now a mutatiin has the following as its entries:
m[0] : time of mutation
m[1] : affected leneage
m[2] : affected site
m[3] : mutation type (k - shift mod 4)
m[4] : a counter -- number of mutations preceeding this one
"""
t,branch = mutation[:2]
affectedSequences = self.coal.getStateNoCoppy(t).blocks[branch]
for sequenceIndex in affectedSequences:
S[sequenceIndex,m_site] += m_k
S[sequenceIndex,m_site] %= 4
# seq_mut_count[sequenceIndex] += 1
# if site_mut_count[m_site] > 1 and S[sequenceIndex,m_site] != 0:
# if site_mut_count[m_site] > 1:
#
# inconsistencyCount += 1
# mutation += (inconsistencyCount,)
#
# inconsistent_columns_new = inconsistentColumnPairs(site_mut_count,S)
# inconsistent_columns_old = inconsistentColumnPairs([ i - int(i==m_site) for i in site_mut_count],S_old)
#
# newInconsistencies = len(inconsistent_columns_new) > len(inconsistent_columns_old)
#
# if len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) > 1:
# typeCount[0] += 1
#
# if len(set((S[i,m_site] for i in xrange(S.shape[0]))) - set((0,))) == 1:
# if newInconsistencies:
# typeCount[2] += 1
# else:
# typeCount[1] += 1
#
# if S[sequenceIndex,m_site] == 0:
# typeCount[3] += 1
#
# typeCountList.append(list(typeCount))
#
# deviant_mutations.append(mutation)
# # deviant_mutations_indices.append(mutationCounter)
mutationCounter += 1
consideredMutations.append(mutation)
# if computeS_seq: S_seq.append(np.matrix(S))
S_old[:,m_site] = S[:,m_site]
# if computeS_seq:
# newSeq = zip(consideredMutations,S_seq[1:])
# newSeq.sort(cmp = lambda x,y: int(np.sign(x[0][0] - y[0][0])))
# consideredMutations = [x[0] for x in newSeq]
# S_seq = S_seq[:1] + [x[1] for x in newSeq]
# else:
# consideredMutations.sort(cmp = lambda x,y: int(np.sign(x[0] - y[0])))
# newSeq = []
consideredMutations.sort(cmp = lambda x,y: int(np.sign(x[0] - y[0])))
#newSeq = []
self.coal.mutations = consideredMutations
self.sequences = S
#
return {"S":S,
# "mutCount_sites":site_mut_count,
# "mutCount_sequences":seq_mut_count,
# "Inconsistencies":inconsistencyCount,
# "typeCount":typeCount,
# "typeCount_arr":np.array(typeCount),
"coalescent" : deepcopy(self.coal)
# "typeCountList" : typeCountList,
# "newSeq" : newSeq,
# "S_seq" : S_seq,
# "deviant_mutations": deviant_mutations
}
def getS(self):
return np.array(self.sequences, dtype=int)
def getSiteMutationCounts(self):
return np.array(self.site_mutationCount, dtype = int)
def getSeq_mutationCount(self):
return np.array(self.sequences_mutationCount, dtype = int )
def getSegregatingSites(self):
return [j for j in range(self.L) if max(self.sequences[:,j]) > 0]
def getInvisibleSites(self):
return [j for j in range(self.L) if max(self.sequences[:,j])==0 and self.site_mutationCount[j] > 0]
def countSegregatingSites(self):
# counter = np.zeros(self.n, dtype = int)
counter = 0
for j in range(self.L):
if len(filter(lambda x: x!= 0 , self.sequences[:,j])) > 0:
counter += 1
return counter
def countMinimalMutations(self):
counter = 0
for j in range(self.L):
mutations = filter(lambda x: x!= 0 , self.sequences[:,j])
if len(mutations) > 0:
counter += len(set(mutations))
return counter
def countInconsistencies(self):
#return len(inconsistentColumnPairs(site_mut_count = self.site_mutationCount , S = self.sequences) )
return len(inconsistentColumnPairs(S = self.sequences) )
def getInconsistentColumnPairs(self):
#return inconsistentColumnPairs(site_mut_count = self.site_mutationCount , S = self.sequences)
return inconsistentColumnPairs(S = self.sequences)
# def inconsistentColumnPairs(site_mut_count, S, ancestral_type_known = True):
# if S.shape[1] != len(site_mut_count):
# raise ValueError('Number of collumns in S (=%i) does not match length of site_mut_count (=%i)'%(S.shape[1],len(site_mut_count)))
# pairs = []
# affectedSites = filter(lambda i: sum(S[j,i] != 0 for j in xrange(S.shape[0])) > 1, xrange(S.shape[1]) )
# #affectedSites = filter(lambda i: site_mut_count[i] > 0, xrange(len(site_mut_count)) )
# for s1 in affectedSites:
# for s2 in filter(lambda x: x > s1 , affectedSites):
# #if not three_gammete_test(S[:,s1],S[:,s2]):
# if not compatibility_test(S[:,s1], S[:,s2], ancestral_type_known = ancestral_type_known):
# pairs.append((s1,s2))
# return pairs
def inconsistentColumnPairs(S, ancestral_type_known = True):
pairs = []
number_of_non_0_elements = np.apply_along_axis(func1d = sum, axis = 0, arr = (S != 0))
affectedSites = [i for i,n in enumerate(number_of_non_0_elements) if n > 1]
# discriminant = lambda i: sum(S[j,i] != 0 for j in xrange(S.shape[0])) > 1
# affectedSites = filter(discriminant, xrange(S.shape[1]))
for i,s1 in enumerate(affectedSites):
#for s2 in filter(lambda x: x > s1 , affectedSites):
for s2 in affectedSites[(i+1):]:
if not two_char_compatibility_test(S[:,s1], S[:,s2], ancestral_type_known = ancestral_type_known):
pairs.append((s1,s2))
return pairs
def two_char_compatibility_test(c1,c2, ancestral_type_known = False, ancestral_type = (0,0)):
'''
Takes two columns of equal length (representing characters on a phylogeny),
and returns true if and only if they are compatible (in the sense that the
number of mutations nessecary to place both characters on a single tree is
the sum of the numbr of mutations nessecary to place each character on a
tree individually)
'''
if len(c1) != len(c2): raise(ValueError('Cannot compare characters of unequal length'))
#convert input to list
c1_list = list(c1)
c2_list = list(c2)
if ancestral_type_known:
c1_list.append(ancestral_type[0])
c2_list.append(ancestral_type[1])
# G_dict = partition_intersection_graph([c1_list,c2_list])
# G_nx = nx.Graph(G_dict['E'])
G_nx = partition_intersection_graph([c1_list,c2_list])
return nx.is_forest(G_nx)
#return len(nx.cycles.cycle_basis(G_nx)) == 0
def three_gammete_test(c1,c2):
'''
Takes two columns/or characters.
Returns False if the characters contain all three of the gammetes (not 0, 0)
(not 0, not 0) and (0, not 0). Returns True otherwise.
'''
n = len(c1)
AX_occurs = reduce(lambda x,y: x or y, map(lambda i: c1[i] == 0 and c2[i] != 0, range(n)))
YA_occurs = reduce(lambda x,y: x or y, map(lambda i: c1[i] != 0 and c2[i] == 0, range(n)))
YX_occurs = reduce(lambda x,y: x or y, map(lambda i: c1[i] != 0 and c2[i] != 0, range(n)))
return not(AX_occurs and YA_occurs and YX_occurs)
def compatibility_criterion_1(c1,c2, ancestral_type_known = False, ancestral_type = (0,0)):
'''
THIS CRITERION IS NESSECARY BUT INSUFFICIENT FOR PAIRWISE COMPATIBILITY!
Takes two columns of equal length (representing characters on a phylogeny),
and returns true if (NOT only if) they are compatible (in the sense that the
number of mutations nessecary to place both characters on a single tree is
the sum of the numbr of mutations nessecary to place each character on a
tree individually)
'''
if len(c1) != len(c2): raise(ValueError('Cannot compare characters of unequal length'))
states_1 = set(c1)
states_2 = set(c2)
gammetes = set(zip(c1,c2))
if ancestral_type_known:
gammetes.add((0,0)) # gammetes is a set; only has effect when (0,0) not already present.
n_states_1 = len(states_1)
n_states_2 = len(states_2)
n_gammetes = len(set(gammetes))
return n_gammetes <= n_states_1 + n_states_2 - 1 #,states_1,states_2,gammetes
def partition_intersection_graph(chars, output_as_dict = False):
'''
accepts a collection (e.g. a list) of characters, and generates their
partition intersection graph G. The output is either a dictionary, where
G['V'] = nodes, and G['E'] = edges, or an object of the type networkx.Graph.
'''
nodes = []
for i,char in enumerate(chars):
p = to_partition(char)
for block in p:
nodes.append((i,tuple(block)))
edges = []
for i, a in enumerate(nodes):
for b in nodes[i+1:]:
if len(set(a[1]).intersection(set(b[1]))) > 0:
edges.append([a,b])
if output_as_dict:
return {'V':nodes,'E':edges}
else:
return nx.Graph(edges)
def get_informative_collumns(S):
'''A columns where all but one character state exists only once is deemed
'un-informative'. This method removes all such columns.
'''
number_of_non_0_elements = np.apply_along_axis(func1d = sum, axis = 0, arr = (S != 0))
segregatingSites = [i for i,n in enumerate(number_of_non_0_elements) if n>1]
#return filter(lambda i: is_informative(S[:,i]), range(S.shape[1]))
return filter(lambda i: is_informative(S[:,i]), segregatingSites)
def get_non_informative_collumns(S):
'''
returns the complement of get_informative_collumns(S)
'''
return filter(lambda i: not is_informative(S[:,i]), range(S.shape[1]))
def get_essential_state_counts(S):
return map(lambda i: number_of_essential_states(S[:,i]), range(S.shape[1]))
def is_informative(char,ancestral_type_known = True, ancestral_type = 0):
''' Determine if a character is informative
Returns: Boolean value
True iff char has only one block, or multiple blocks, with the
seccond-largest block having size 1 (i.e. if only one non-trivial block)
'''
char = list(char)
if ancestral_type_known:
char.append(ancestral_type)
n_part = map(len,to_partition(char)) # get block-sizes of partition
if len(n_part) == 1:
return False # len(part)==1 iff single state character
else:
n_part.sort(reverse = True)
return n_part[1] > 1
def number_of_essential_states(char, ancestral_type_known = True, ancestral_type = 0):
'''returns the number of states which appear more than one.
if the ancestral type is indicated as known, the count of the state indicated by
the variable 'ancestral_type' artificially has its count boosted by 1.
'''
char = list(char)
if ancestral_type_known:
char = char + [ancestral_type]
n_part = map(len,to_partition(char))
return sum([b > 1 for b in n_part])
def to_partition(char, block_constructor = set):
'''in : a character e.g. ['A','A','G','A','T']
out: a list of sets. each set corresponds to all indices (from 0 to len(char)-1)
with the same state, e.g. [set([0,1,3]),set([2]),set([4])]. This list will be
sorted in order of least elements.'''
states = set(list(char))
partition = [block_constructor( filter( lambda i: char[i] == s, range(len(char)) ) ) for s in states]
partition.sort(key = min)
return partition
def count_minimal_vs_actual_mutations(simulation, ancestral_type_known = True, ret_all= True):
#assert isinstance(simulation,simulator_KingmanFiniteSites)
site_mut_count = simulation.getSiteMutationCounts() # counts actual number of mutations
S = simulation.getS()
if ancestral_type_known:
# add a (0,0,0,...,0)-row to S; representing the sequence of the MRCA
S = np.r_[S,np.zeros( (1,S.shape[1]), dtype = int)]
block_counts = [len( to_partition( S[:,i] ) ) for i in range(S.shape[1])]
min_mut_counts = [i-1 for i in block_counts]
A = np.array(site_mut_count)
B = np.array(min_mut_counts)
C = A - B
if ret_all:
return {'actual':A, 'minimal':B, 'diff':C}
else:
return C
def draw_partition_intersection_graph(representation):
if type(representation) == list: # we have been passed a list of characters
chars = representation
G = partition_intersection_graph(chars)
else:
assert type(representation) == nx.Graph
G = representation
nx.draw(G, node_color = [n[0] for n in G.nodes()])
def fromEdgesToConnectedComponents(pairs):
'''
In: a list of edges (i,j) satisfying i < j
Out: a list of sets [S_1, S_2, ...] corresponding to the verticees of
connected components in the graph encoded by the input.
'''
if len(pairs) == 0:
return []
# vertices_with_duplicates = reduce( lambda x,y: x+y, pairs)
# verticees = list(set(vertices_with_duplicates))
# verticees.sort()
#we encode the set of all blocks in the graph by the smallest vertex if.
# hence j belongs to the block encoded by i if and only if is the minimum
# index in the connected component containing j.
blocks = {}
block_index = {}
# blocks = dict(verticees[0]:set(verticees[0],))
# block_indices = dict(verticees[0]:verticees[0])
#for v in verticees:
# verticees_seen = set([])
for edge in pairs:
vertices_seen = block_index.keys()
v1 = edge[0]
v2 = edge[1]
if v1 not in vertices_seen and v2 not in vertices_seen:
block_index[v1] = v1
block_index[v2] = v1
blocks[v1] = set([v1,v2])
if v1 in vertices_seen and v2 not in vertices_seen:
block_index[v2] = block_index[v1]
blocks[block_index[v1]].add(v2)
if v1 not in vertices_seen and v2 in vertices_seen:
block_index[v1] = block_index[v2]
blocks[block_index[v2]].add(v1)
if v1 in vertices_seen and v2 in vertices_seen:
if block_index[v1] != block_index[v2]:
if block_index[v1] < block_index[v2]:
index_min = block_index[v1]
index_max = block_index[v2]
else:
index_max = block_index[v1]
index_min = block_index[v2]
block_max = blocks.pop(index_max)
blocks[index_min] = blocks[index_min].union(block_max)
for v in block_max:
block_index[v] = index_min
return blocks.values()
def printPairListWithDegrees(pairs,components):
vertices_with_duplicates = reduce( lambda x,y: x+y, pairs)
verticees = list(set(vertices_with_duplicates))
verticees.sort()
degree = dict([(v,0) for v in verticees])
for v1,v2 in pairs:
degree[v1] += 1
degree[v2] += 1
degreeList = degree.items()
degreeList.sort(cmp = lambda x,y: y[1] - x[1])
print 'vertices (%i total):'%len(verticees)
print verticees
print '\nedges (%i total):'%len(pairs)
print set(pairs)
print '\nconnected components:(%i total)'%len(components)
print components
print '\ndegrees (as (vertex,degree) ):'
print degreeList
def generate_plots_for_jotun(n = 8,L = 100,thetaMax = 100,thetaMin=0.001,steps=5,N=100,savePath = ''):
saveFigures = len(savePath) > 0
print_a_larger_than_10 = 10
print_a_50 = True
#Run simulations
thetas = np.logspace(log10(thetaMin),log10(thetaMax),steps)
typeCouunter_simulationAverages = np.zeros((len(thetas),4))
averageInconsistencyBlocksizeFrequency = np.zeros((len(thetas),L))
for i,theta in enumerate(thetas):
simulations = [simulator_KingmanFiniteSites(n,float(theta)/2,L) for z in range(N)]
typeCounter = np.zeros((N,4))
for j in range(N):
s = simulations[j]
S = s.getS()
for column in range(S.shape[1]):
typeCounter[j,len(set(S[:,column]))-1] += 1
incompatible_pairs = s.getInconsistentColumnPairs()
incompatible_components = fromEdgesToConnectedComponents(incompatible_pairs)
# print incompatible_pairs
# print incompatible_components
incompatability_group_sizes = map(len,incompatible_components)
#incompatability_group_sizes = [len(c) for c in incompatible_components]
for k in incompatability_group_sizes:
if print_a_larger_than_10 > 0 and k >= 10:
print 'Incompatitility graph: generated with (n,L,theta) = (%i,%i,%10.5f):\n'%(n,L,theta)
printPairListWithDegrees(incompatible_pairs,incompatible_components)
print_a_larger_than_10 -= 1
print '-'*80
averageInconsistencyBlocksizeFrequency[i][k -1] += 1
typeCouunter_simulationAverages[i] = [sum(typeCounter[:,k]) / float(N) for k in range(4)]
averageInconsistencyBlocksizeFrequency[i] *= 1.0/N
#generate plot of frequency of number of dirrerent characters:
fig = pl.figure(figsize=(20,5))
pl.suptitle('%i sequences of length %i; %i simulations per point '%(n,L,N))
pl.subplot(121)
pl.xlabel(r"$\frac{\theta}{L}$")
pl.ylabel("mean frequency of loci with k types")
pl.xscale('log')
for k in (1,2,3,4):
pl.plot(thetas/L , typeCouunter_simulationAverages[:,k-1], label = '$k = %i$'%k)
pl.legend(loc='best')
#generate plot of frequency of incompatability-blocksizes
pl.subplot(122)
pl.xlabel(r"$\frac{\theta}{L}$")
pl.ylabel("mean frequency of maximal\nincompatible groups of size s")
pl.xscale('log')
blocksizelist = [2,3,4,5,10,25,50]
for k in blocksizelist:
pl.plot(thetas/L , averageInconsistencyBlocksizeFrequency[:,k-1],label = '$s = %i$'%k)
pl.legend(loc='best')
fig.tight_layout()
fig.subplots_adjust(top=0.88)
if saveFigures:
pl.savefig(savePath+"jotunPlot__L_%i__N_%i__n_%i.pdf"%(L,N,n))
pl.savefig(savePath+"jotunPlot__L_%i__N_%i__n_%i.png"%(L,N,n))
pl.savefig(savePath+"jotunPlot__L_%i__N_%i__n_%i.eps"%(L,N,n))
pl.savefig(savePath+"jotunPlot__L_%i__N_%i__n_%i.svg"%(L,N,n))
csv_path = savePath+"jotunPlot__L_%i__N_%i__n_%i"%(L,N,n)
#csv_out = open(csv_path,w)
array1_out = np.c_[thetas/L , typeCouunter_simulationAverages]
np.savetxt(csv_path+'_subfig_1.csv',array1_out,fmt='%10.10f',delimiter = ', ')
array2_out = np.c_[thetas/L , averageInconsistencyBlocksizeFrequency[:,blocksizelist]]
np.savetxt(csv_path+'_subfig_2.csv',array2_out,fmt='%10.10f',delimiter = ', ')
# for vector in [thetas/L]+[[]]+[typeCouunter_simulationAverages[:,k-1] for k in (1,2,3,4)]+[[]]+[averageInconsistencyBlocksizeFrequency[:,k-1] for k in range(2,11)]:
# csv_out.write(', '.join(['%.10f'%x for x in vector]))
# csv_out.write('\n')
# csv_out.close()
pl.show()
def generate_plot_1(n = 8,L = 100,thetaMax = 10,thetaMin=0.01,steps=20,N=100,savePath = ''):
saveFigures = len(savePath) > 0
#Run simulations
h = (float(thetaMax) - thetaMin)/steps
thetas = np.arange(thetaMin,thetaMax,h)+h
avgRate = np.zeros(len(thetas))
inconsistencyCount = np.zeros(len(thetas))
invisibleSiteCount = np.zeros(len(thetas))
for i,theta in enumerate(thetas):
simulations = [simulator_KingmanFiniteSites(n,float(theta)/2,L) for z in range(N)]
# rates = []
# inconsistencies = []
# invisibleSites = 0
# segCounter = 0
for s in simulations:
# for i in range(N):
# s = simulations[i]
minimalMutations = s.countMinimalMutations()
actualMutations = len(s.coal.mutations)
segregatingSites = s.countSegregatingSites()
# siteMutationCounts = s.getSiteMutationCounts()
if minimalMutations > 0:
rates.append( float(actualMutations) / minimalMutations )
inconsistencies.append(s.countInconsistencies())
invisibleSites += len(s.getInvisibleSites())/float(segregatingSites)
segCounter += 1
if segCounter > 0:
invisibleSiteCount[i] = invisibleSites/float(segCounter)
else:
invisibleSiteCount[i] = 0
avgRate[i] = np.average(rates)
inconsistencyCount[i] = np.average(inconsistencies) / binom(L,2)
#generate plot 1
pl.figure()
label = "L,N,n = %i,%i,%i"%(L,N,n)
pl.xlabel(r"$\frac{\theta}{L}$")
pl.ylabel(r"(actual # mutations) / (# visible mutations)")
pl.plot(thetas/L , avgRate , color='blue' , label=label)
pl.legend(loc='upper left')
if saveFigures: pl.savefig(savePath+"plot1__L_%i__N_%i__n_%i.pdf"%(L,N,n))
#generate plot 2
pl.figure()
# label = "(x;y) = (theta/L ; fraction of inconsistent columns)\nL,N,n = %i,%i,%i"%(L,N,n)
label = "L,N,n = %i,%i,%i"%(L,N,n)
pl.xlabel(r"$\frac{\theta}{L}$")
pl.ylabel(r"#inconsistent column-pairs / $\binom{L}{2}$")
pl.plot(thetas/L, inconsistencyCount, color = "red", label = label)
pl.legend(loc='upper left')
if saveFigures: pl.savefig(savePath+"plots/plot2__L_%i__N_%i__n_%i.pdf"%(L,N,n))
#generate plot 3
pl.figure()
# label = "(x;y) = (theta/L ; fraction of invisible sites)\nL,N,n = %i,%i,%i"%(L,N,n)
label = "L,N,n = %i,%i,%i"%(L,N,n)
pl.xlabel(r"$\frac{\theta}{L}$")
pl.ylabel(r"#invisible sites / #segregating sites")
pl.plot(thetas/L, invisibleSiteCount, color = "green", label = label)
pl.legend(loc='upper right')
if saveFigures: pl.savefig(savePath+"plots/plot3__L_%i__N_%i__n_%i.pdf"%(L,N,n))
pl.show()
#def generate_plot_2(n,L,theta,N=100):
# simulations = [simulator_KingmanFiniteSites(n,float(theta)/2,L) for z in range(N)]
# count_inconsistencies_total = 0
# for i,s in enumerate(simulations):
# count_inconsistencies = s.countInconsistColumnPairs()
# count_inconsistencies_total += count_inconsistencies
def runTests():
# generate_plot_1(n=10,L=50,thetaMax=50,steps=50,N=1000)
generate_plot_1(n=20,L=200,thetaMax=200,steps=20,N=1000)
def simulateUntillXMutations(N = 1000, n = 20, L = 100, mutRate = 200,printFirst10 = False,X = 2):
K_list = [simulator_KingmanFiniteSites(n,mutRate,L,False) for i in xrange(N)]
totalTypeCount = np.array((0,0,0,0))
misses = 0
k_res_List = []
for K in K_list:
res = K.untillFirstXInconsistencies(X = X)
#Guarentee that we actually got enough mutations.
if res["Inconsistencies"] == X:
totalTypeCount += res["typeCount_arr"]
k_res_List.append([K,res])
else:
misses += 1
if printFirst10:
for K in K_list[:10]:
print chronology(K)
print ""
N_eff = N - misses
return totalTypeCount,N_eff,k_res_List
def chronology(K):
'''
Outputs a String of all events in K, sorted in chronological order
'''
events = list(K.coal.jumps)
events.extend(K.coal.mutations)
events.sort(cmp = lambda x,y: int(np.sign(x[0] - y[0])))
return "\n".join(map(eventToString,events))
def eventToString(e):
if str(type(e[1])) == "<class 'libCoal.partition'>":
return "t = %1.4f\t"%e[0] + str(e[1])
if isinstance(e[1], int):
outStr = "t = %1.4f\t"%e[0]
if len(e) > 5: outStr += " *** Inconsistency number %i *** "%e[5]
outStr += "Mutation (%i in order added"%e[4]
outStr += "; +%i-mod4 at site %i affecting block %i)"%(e[3],e[2],e[1])
return outStr
else:
print "WTF!",e
def scatterplot_index_and_time_of_abnormal_mutations(N = 1000 , L = 100, n = 20):
theta = 1.2 * L
typeCounts,N_eff,k_res_list = simulateUntillXMutations(N = N, n = n , L = L, mutRate=theta, printFirst10= False)
t1, I1 = np.zeros(N_eff,dtype = float), np.zeros(N_eff,dtype = int)
t2, I2 = np.zeros(N_eff,dtype = float), np.zeros(N_eff,dtype = int)
i = 0
for K,res in k_res_list:
mutations = res["deviant_mutations"]
t1[i], I1[i] = mutations[0][0],mutations[0][4]
t2[i], I2[i] = mutations[1][0],mutations[1][4]
i += 1
pl.figure()
pl.subplot(1,2,1)
pl.scatter(t1,t2,marker="+",s=10)
pl.axis('scaled')
pl.xlim(xmin = 0.0)
pl.ylim(ymin = 0.0)
pl.title("(coalescent) time of mutations")
pl.xlabel(r"$t_1$")
pl.ylabel(r"$t_2$")
pl.subplot(1,2,2)
pl.scatter(I1,I2,marker="+",s=10)
pl.xlim(xmin = 0.0)
pl.ylim(ymin = 0.0)
pl.axis('scaled')
pl.xlabel(r"$i_1$")
pl.ylabel(r"$i_2$")
pl.title("Index of mutations\n(in order of simulation)")
pl.suptitle("N = %i L = %i n = %i"%(N_eff,L,n))
filename_str = "plots/scatter_mut1_vs_2/scatter_mut1_vs_2_N_%i_L_%i_n_%i"%(N_eff,L,n)
try:
pl.savefig(filename_str+".pdf")
pl.savefig(filename_str+".png")
pl.savefig(filename_str+".ps")
pl.savefig(filename_str+".svg")
pl.savefig(filename_str+".eps")
except Exception:
print "could not save in all formats"
pl.draw()
def generatePlot_of_mutationTypes(N = 1000,L = 100, n = 20, printFirsrst10 = False,show = False, X=2):
theta = 2.0 * L
typeCounts,N_eff,K_list = simulateUntillXMutations(N = N, n = n , L = L, mutRate=theta, printFirst10= printFirsrst10, X = X)
#run simulations
#typeCounts,N_eff = simulateUntillXMutations(N = N, n = n , L = L, mutRate=theta, printFirst10= printFirsrst10,X = X)[:2]
#plot simulation-results
width = 0.8
#color = ("cyan","orange","grey","magenta")
color = ("white","white","white","white")
hatch = ("//", ".","\\")
left = np.arange(1,5) - width/2.0
pl.figure()
bars = pl.bar(left,typeCounts,width = width, color = color)
for bar,pattern in zip(bars[1:],hatch):
bar.set_hatch(pattern)
# pl.xlabel("Type of incompatibility")
pl.xticks(np.arange(1,5),("3 types","2 types\n2 mutations\n3 gammete test passes","2 types\n3 gammete test fails","1 type\n2 mutations"))
pl.ylabel("frequency")
pl.title("Result of %i simulations stopped after %i events\nsequences = %i sequence-length = %i"%(N_eff,X,n,L))
# pl.tight_layout()
pl.draw()
filename_str = "plots/bars_stoppedProcess/bar_typeFrequencies_X_%i_N_%i_L_%i_n_%i_no_color"%(X,N_eff,L,n)
try:
pl.savefig(filename_str+".pdf")
pl.savefig(filename_str+".png")
pl.savefig(filename_str+".ps")
pl.savefig(filename_str+".svg")
pl.savefig(filename_str+".eps")
except Exception:
print "could not save in all formats (pdf,png,ps,svg,eps)"
if show:
pl.show()
np.savetxt(filename_str+".csv", np.array(typeCounts,ndmin=2),fmt='%d', delimiter = ', ',header = "(a) 3 types, (b) 2 types 2 mutations 3 gammete test passes, (c) 2 types 3 gammete test fails, (d) 1 type 2 mutations")
def run_generatePlot_of_mutationTypes(arglist = [(1000,100,20)],X = 2):
for args in arglist:
N,L,n = args
print "%s Generating plots (Barcharts) for N,L,n,X = %i,%i,%i,%i"%((ctime(),)+tuple(args)+(X,))
generatePlot_of_mutationTypes(N=N,L=L,n=n,printFirsrst10 = False,show = False, X = X)
def run_generateScatterplots(arglist = [(100,100,20)]):
for args in arglist:
N,L,n = args
print "Generating plots (scatterplts) for N,L,n = %i,%i,%i"%tuple(args)
scatterplot_index_and_time_of_abnormal_mutations(N,L,n)
| gpl-2.0 |
brguez/TEIBA | src/python/activityStatus_VAF_association.py | 1 | 3005 | #!/usr/bin/env python
#coding: utf-8
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import time
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy import stats
import seaborn as sns
import scipy
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('activityRateFile', help='')
parser.add_argument('alleleFreqFile', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
activityRateFile = args.activityRateFile
alleleFreqFile = args.alleleFreqFile
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "activityRateFile: ", activityRateFile
print "alleleFreqFile: ", alleleFreqFile
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1) Make activity rate dataframe
#####################################
dfActivityRate = pd.read_csv(activityRateFile, header=0, index_col=0, sep='\t')
print "dfActivityRate: ", dfActivityRate
#### 2) Make allele frequency across ancestries dataframe
##########################################################
## Load allele count matrix into dataframe
dfAlleleFreqAncestry = pd.read_csv(alleleFreqFile, header=0, index_col=0, sep='\t')
print "dfAlleleFreqAncestry: ", dfAlleleFreqAncestry
#### 3) none/low vs moderate/high activity VAF comparison
############################################################
### Prepare Data
dataframe = pd.concat([dfActivityRate["activityStatus"], dfAlleleFreqAncestry["PCAWG"]], axis=1)
print "dataframe: ", dataframe
dataframe1 = dataframe[(dataframe['activityStatus']=="none") | (dataframe['activityStatus']=="low")]
dataframe1['activityStatus'] = "none/low"
dataframe2 = dataframe[(dataframe['activityStatus']=="moderate") | (dataframe['activityStatus']=="high")]
dataframe2['activityStatus'] = "moderate/high"
dataframe3 = pd.concat([dataframe1, dataframe2])
x = dataframe3[(dataframe3['activityStatus']=="none/low")]["PCAWG"].values
y = dataframe3[(dataframe3['activityStatus']=="moderate/high")]["PCAWG"].values
mannWhitneyU = stats.mannwhitneyu(x, y, alternative='two-sided')
Ustat = mannWhitneyU[0]
pvalue = round(mannWhitneyU[1], 4)
## Make boxplot:
#################
title = "Mann-Whitney U = " + str(Ustat) + ", P = " + str(pvalue)
fig = plt.figure(figsize=(5,6))
fig.suptitle(title, fontsize=14)
ax = sns.boxplot(x="activityStatus", y="PCAWG",data=dataframe3, width=0.5, showfliers=False)
ax = sns.stripplot(x="activityStatus", y="PCAWG", data=dataframe3, jitter=True, color=".3")
ax.set(ylim=(-0.1, 1.1))
ax.set_xlabel('')
# Add mann whitney U statistic and p-value to the plot:
## Save figure
fileName = outDir + "/low_vs_high_activity_VAF_boxPlot.pdf"
plt.savefig(fileName)
sys.exit(1)
| gpl-3.0 |
yunfeilu/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
trnet4334/img_colorization | landscape_colorizer/colorization_use_pretrained_conv_layers.py | 1 | 6440 | from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D, MaxPooling2D,Conv2D
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import os, shutil
import theano
from PIL import Image
from numpy import *
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from skimage.color import rgb2lab
import skimage.color as color
# General parameters
img_rows, img_cols = 96, 96 # Image dimensions after resizing
bin_num = 20 # For classification : Since a and b channel contains continous value from -100 to 100, we bin them to several classes
input_channels = 1 # The paper use 3 duplicated channel as input since pre-trained network has 3 channel, but we can use 1 if we are not using VGG-16
test_img_num = 40 # Use first-n files in the data folder to test the model
lab_channels = ['l', 'a', 'b']
# Cnn model parameters
era = 1000
epoch = 3
batch_size = 64
validation_split = 0.1
# Paths
img_input_path = "./combined/"
img_output_path = "./predict_output_pretrained_conv_layers/"
img_reconstructed_path = "./reconstructed_input_after_bining/"
img_channels_path = "./channels_img/"
def save_img_of_channel(img_lab, channel, name="img"):
img_lab_cp = img_lab.copy()
# Delete the rest channels by setting them to 0
if channel == 'l':
img_lab_cp[:,:,1:] = 0
elif channel == 'a':
img_lab_cp[:,:,0] = 0
img_lab_cp[:,:,2] = 0
elif channel == 'b':
img_lab_cp[:,:,:2] = 0
else:
print "[ERROR!!] The channel should be 'l', 'a' or 'b' "
return
img_rgb_channel = color.lab2rgb(img_lab_cp)
im = Image.fromarray((img_rgb_channel * 255).astype(uint8))
im.save(img_channels_path + name + "_" + channel + ".jpg", "jpeg")
def save_image_by_channels(img_lab, name):
# Seperate the image channels L a* and b*
for i in xrange(0, len(lab_channels)):
img = img_lab[:,:,i]
save_img_of_channel(img_lab, lab_channels[i], name=name)
def reconstruct_image_by_lab_channels(img_l, img_a, img_b):
img = array([img_l.T, img_a.T, img_b.T]).T
img_rgb_channel = color.lab2rgb(img)
im = Image.fromarray((img_rgb_channel * 255).astype(uint8))
return im
def get_img_ab_binned(img_lab):
img_a = img_lab[:,:,1]
img_b = img_lab[:,:,2]
img_a_binned = ((img_a + 100) * bin_num) / 200
img_b_binned = ((img_b + 100) * bin_num) / 200
return img_a_binned.astype(int), img_b_binned.astype(int)
def get_img_ab_unbinned(img_a_binned, img_b_binned):
img_a_unbinned = ((img_a_binned * 200) / bin_num) - 100.0
img_b_unbinned = ((img_b_binned * 200) / bin_num) - 100.0
return img_a_unbinned, img_b_unbinned
def save_input_image_after_bining(img_lab, name='img'):
# Use this function to test how bin_num affect the original input image
img_a_binned, img_b_binned = get_img_ab_binned(img_lab)
img_a_unbinned, img_b_unbinned = get_img_ab_unbinned(img_a_binned, img_b_binned)
im = reconstruct_image_by_lab_channels(img_lab[:,:,0], img_a_unbinned, img_b_unbinned)
im.save(img_reconstructed_path + name + "_reconstructed_after_bining.jpg", "jpeg")
def get_duplicated_l_channel(img_l, channels):
img_l_duplicated = []
for i in xrange(channels):
img_l_duplicated.append(img_l.T)
result = array(img_l_duplicated).T
return result
''' Start Here '''
imlist = os.listdir(img_input_path)
imlist.sort()
# ''' For playing with lab images and also testing the affect of bining '''
for i in xrange(test_img_num):
# Save image of each channel (l, a, b)
img_rgb = array(Image.open(img_input_path + imlist[i]).resize((img_rows,img_cols)))
img_lab = rgb2lab(img_rgb)
save_image_by_channels(img_lab, imlist[i])
# Test the color distortion of input image after bining
save_input_image_after_bining(img_lab, name = imlist[i])
''' For training and testing cnn model '''
X = [] # Traning inputs
X_l = [] # Keep the l channel to reconstruct the image from lab to rgb
Y = [] # Traning labels
count = 1;
for img in imlist:
print "loading data .... " + str(count) + "/" +str(len(imlist))
img_rgb = array(Image.open(img_input_path + img).resize((img_rows,img_cols)))
img_lab = rgb2lab(img_rgb)
img_a_binned, img_b_binned = get_img_ab_binned(img_lab)
img_y = np.append(img_a_binned.flatten(), img_b_binned.flatten())
y = np_utils.to_categorical(img_y, bin_num)
X.append(get_duplicated_l_channel(img_lab[:,:,0], input_channels)) # The paper use 3 duplicated l channel as network input
X_l.append(img_lab[:,:,0])
Y.append(y)
count += 1
X = array(X)
Y = array(Y)
X_l = array(X_l)
print X.shape
print Y.shape
# Use the trained model to do prediction
# Load model from json file
json_file = open('model(80).json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# Load weights into model
model.load_weights("weight(80).hdf5")
for i in xrange(1, 11, 1):
model.layers.pop()
model.layers[-1].outbound_nodes = []
model.outputs = [model.layers[-1].output]
model.summary()
for layer in model.layers:
layer.trainable = False
model.add(Dense(1024, name ='dd'))
model.add(BatchNormalization(name='nn'))
model.add(Activation('relu', name='rr'))
model.add(Dropout(0.5, name='dr'))
model.add(Dense(img_rows * img_cols * 2 * bin_num, name='den'))
model.add(Reshape((img_rows * img_cols * 2, bin_num)))
model.add(Activation('softmax', name="act"))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["acc"])
model.summary()
for j in xrange(era):
hist = model.fit(X[test_img_num:], Y[test_img_num:], batch_size=batch_size, nb_epoch=epoch,
verbose=1, validation_split=validation_split, shuffle=True)
if j % 10 == 0:
for i in xrange(0, test_img_num):
result = model.predict_classes(X[i].flatten().reshape(1, img_rows, img_cols, input_channels))
print result
reshaped = result.reshape(2, img_rows, img_cols)
a, b = get_img_ab_unbinned(reshaped[0], reshaped[1])
im = reconstruct_image_by_lab_channels(X_l[i], a, b)
im.save(img_output_path + imlist[i] + "_predicted_" + "era_" +str(j) + ".jpg", "jpeg")
model_json = model.to_json()
with open("colorize_with_pretrain.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("colorize_with_pretrain.hdf5", overwrite=True)
| mit |
gromitsun/sim-xrf-py | others/snr_90_180/snr_0_90_180_as.py | 1 | 1155 | import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
plt.figure()
ax1 = plt.gca()
ax2 = ax1.twinx()
openings = np.load('snr_as0_5um.npz')['openings']
snr = np.load('snr_as0_5um.npz')['snr']
p2b = np.load('snr_as0_5um.npz')['p2b']
ax1.plot(openings,snr, 'r-', label=r'0$^\circ$')
ax2.plot(openings,p2b, 'r--', label=r'0$^\circ$')
openings = np.load('snr_as90_5um.npz')['openings']
snr = np.load('snr_as90_5um.npz')['snr']
p2b = np.load('snr_as90_5um.npz')['p2b']
ax1.plot(openings,snr, 'b-', label=r'90$^\circ$')
ax2.plot(openings,p2b, 'b--', label=r'90$^\circ$')
openings = np.load('snr_as180_5um.npz')['openings']
snr = np.load('snr_as180_5um.npz')['snr']
p2b = np.load('snr_as180_5um.npz')['p2b']
ax1.plot(openings,snr, 'g-', label=r'180$^\circ$')
ax2.plot(openings,p2b, 'g--', label=r'180$^\circ$')
ax1.set_ylabel(r'S/N (solid)')
ax2.set_ylabel(r'P/B (dashed)')
ax2.set_yscale('log')
ax1.legend(loc=0,ncol=3)
ax1.set_xlabel(r'Collection semi-angle $\phi$ (deg)')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2,2))
# ax2.ticklabel_format(style='sci', axis='y', scilimits=(-2,2))
plt.show()
| mit |
mugizico/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
kcavagnolo/astroML | book_figures/chapter8/fig_regression_mu_z.py | 3 | 3813 | """
Cosmology Regression Example
----------------------------
Figure 8.2
Various regression fits to the distance modulus vs. redshift relation for a
simulated set of 100 supernovas, selected from a distribution
:math:`p(z) \propto (z/z_0)^2 \exp[(z/z_0)^{1.5}]` with :math:`z_0 = 0.3`.
Gaussian basis functions have 15 Gaussians evenly spaced between z = 0 and 2,
with widths of 0.14. Kernel regression uses a Gaussian kernel with width 0.1.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from astroML.cosmology import Cosmology
from astroML.datasets import generate_mu_z
from astroML.linear_model import LinearRegression, PolynomialRegression,\
BasisFunctionRegression, NadarayaWatson
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate data
z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0)
cosmo = Cosmology()
z = np.linspace(0.01, 2, 1000)
mu_true = np.asarray([cosmo.mu(zi) for zi in z])
#------------------------------------------------------------
# Define our classifiers
basis_mu = np.linspace(0, 2, 15)[:, None]
basis_sigma = 3 * (basis_mu[1] - basis_mu[0])
subplots = [221, 222, 223, 224]
classifiers = [LinearRegression(),
PolynomialRegression(4),
BasisFunctionRegression('gaussian',
mu=basis_mu, sigma=basis_sigma),
NadarayaWatson('gaussian', h=0.1)]
text = ['Straight-line Regression',
'4th degree Polynomial\n Regression',
'Gaussian Basis Function\n Regression',
'Gaussian Kernel\n Regression']
# number of constraints of the model. Because
# Nadaraya-watson is just a weighted mean, it has only one constraint
n_constraints = [2, 5, len(basis_mu) + 1, 1]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(subplots[i])
# fit the data
clf = classifiers[i]
clf.fit(z_sample[:, None], mu_sample, dmu)
mu_sample_fit = clf.predict(z_sample[:, None])
mu_fit = clf.predict(z[:, None])
chi2_dof = (np.sum(((mu_sample_fit - mu_sample) / dmu) ** 2)
/ (len(mu_sample) - n_constraints[i]))
ax.plot(z, mu_fit, '-k')
ax.plot(z, mu_true, '--', c='gray')
ax.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1)
ax.text(0.5, 0.05, r"$\chi^2_{\rm dof} = %.2f$" % chi2_dof,
ha='center', va='bottom', transform=ax.transAxes)
ax.set_xlim(0.01, 1.8)
ax.set_ylim(36.01, 48)
ax.text(0.05, 0.95, text[i], ha='left', va='top',
transform=ax.transAxes)
if i in (0, 2):
ax.set_ylabel(r'$\mu$')
else:
ax.yaxis.set_major_formatter(plt.NullFormatter())
if i in (2, 3):
ax.set_xlabel(r'$z$')
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
plt.show()
| bsd-2-clause |
jrabenoit/skellify | mltools.py | 1 | 7990 | #!/usr/bin/env python
from sklearn import svm
from sklearn import naive_bayes
from sklearn import neighbors
from sklearn import ensemble
from sklearn import linear_model
import copy
#.fit fits the model to the dataset in brackets. Score tests the fitted model on data.
def GauNaiBay(X_train, X_test, y_train, y_test):
for i in range(0,len(X_train)):
gnb = naive_bayes.GaussianNB()
gnb.fit(X_train[i], y_train[i])
X_train[i] = gnb.score(X_train[i], y_train[i])
X_test[i] = gnb.score(X_test[i], y_test[i])
return X_train, X_test
def KNeighbors(X_train, X_test, y_train, y_test):
for i in range(0,len(X_train)):
knc = neighbors.KNeighborsClassifier()
knc.fit(X_train[i], y_train[i])
X_train[i] = knc.score(X_train[i], y_train[i])
X_test[i] = knc.score(X_test[i], y_test[i])
return X_train, X_test
def CSupSvc(X_train, X_test, y_train, y_test):
for i in range(len(X_train)):
csvm = svm.SVC()
csvm.fit(X_train[i], y_train[i])
X_train[i] = csvm.score(X_train[i], y_train[i])
X_test[i] = csvm.score(X_test[i], y_test[i])
return X_train, X_test
def RandomForest(X_train, X_test, y_train, y_test):
for i in range(len(X_train)):
rf = ensemble.RandomForestClassifier()
rf.fit(X_train[i], y_train[i])
X_train[i] = rf.score(X_train[i], y_train[i])
X_test[i] = rf.score(X_test[i], y_test[i])
return X_train, X_test
def ExtraTrees(X_train, X_test, y_train, y_test):
for i in range(len(X_train)):
rf = ensemble.ExtraTreesClassifier()
rf.fit(X_train[i], y_train[i])
X_train[i] = rf.score(X_train[i], y_train[i])
X_test[i] = rf.score(X_test[i], y_test[i])
return X_train, X_test
def LinearSgd(X_train, X_test, y_train, y_test):
for i in range(len(X_train)):
sgd = linear_model.SGDClassifier()
sgd.fit(X_train[i], y_train[i])
X_train[i] = sgd.score(X_train[i], y_train[i])
X_test[i] = sgd.score(X_test[i], y_test[i])
return X_train, X_test
ml_func_dict = {
# 'GauNaiBay':GauNaiBay,
# 'KNeighbors':KNeighbors,
# 'CSupSvc':CSupSvc,
'RandomForest':RandomForest
# 'ExtraTrees':ExtraTrees
# 'LinearSgd':LinearSgd
}
'''
def LSvm_maker(penalty,loss,dual,C):
return lambda fX_train, fX_test, fy_train, fy_test: LSvm_base(fX_train, fX_test, fy_train, fy_test, penalty,loss,dual,C)
#remember to change the final run values of C as well, if you change this one
for C in [0.01,0.03,0.1,0.3,1.0,3.0,10.0,30.0]:
ml_func_dict['LSvmL1_C'+str(C).replace(".","p")] = LSvm_maker(penalty='l1',loss=None,dual=False,C=C)
ml_func_dict['LSvmL2_C'+str(C).replace(".","p")] = LSvm_maker(penalty='l2',loss='hinge',dual=True,C=C)
def LSvm_base(fX_train, fX_test, fy_train, fy_test, penalty='l1', loss=None, dual=False, C=1.0):
# Note: c argument not actually used yet
lX_train = copy.copy(fX_train)
lX_test = copy.copy(fX_test)
ly_train = copy.copy(fy_train)
ly_test = copy.copy(fy_test)
for i in range(0,len(fX_train)):
if not loss:
lsvm = svm.LinearSVC(penalty=penalty, dual=dual, C=C)
else:
lsvm = svm.LinearSVC(penalty=penalty, dual=dual, loss=loss)
lsvm.fit(lX_train[i], ly_train[i])
lX_train[i] = lsvm.score(lX_train[i], ly_train[i])
lX_test[i] = lsvm.score(lX_test[i], ly_test[i])
return lX_train, lX_test
'''
################################################################################
################################################################################
def GauNaiBayFinal(X_train, X_test, y_train, y_test):
gnb = naive_bayes.GaussianNB()
gnb.fit(X_train, y_train)
lX_train = gnb.score(X_train, y_train)
lX_test = gnb.score(X_test, y_test)
lX_train_predict= gnb.predict(X_train)
lX_test_predict= gnb.predict(X_test)
ly_train_labels= y_train
ly_test_labels= y_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
def KNeighborsFinal(X_train, X_test, y_train, y_test):
knc = neighbors.KNeighborsClassifier()
knc.fit(X_train, y_train)
lX_train = knc.score(X_train, y_train)
lX_test = knc.score(X_test, y_test)
lX_train_predict= knc.predict(X_train)
lX_test_predict= knc.predict(X_test)
ly_train_labels= y_train
ly_test_labels= y_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
def CSupSvcFinal(X_train, X_test, y_train, y_test):
csvm = svm.SVC()
csvm.fit(X_train, y_train)
lX_train = csvm.score(X_train, y_train)
lX_test = csvm.score(X_test, y_test)
lX_train_predict= csvm.predict(X_train)
lX_test_predict= csvm.predict(X_test)
ly_train_labels= y_train
ly_test_labels= y_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
def RandomForestFinal(X_train, X_test, y_train, y_test):
rf = ensemble.RandomForestClassifier()
rf.fit(X_train, y_train)
lX_train = rf.score(X_train, y_train)
lX_test = rf.score(X_test, y_test)
lX_train_predict= rf.predict(X_train)
lX_test_predict= rf.predict(X_test)
ly_train_labels= y_train
ly_test_labels= y_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
def ExtraTreesFinal(X_train, X_test, y_train, y_test):
rf = ensemble.ExtraTreesClassifier()
rf.fit(X_train, y_train)
lX_train = rf.score(X_train, y_train)
lX_test = rf.score(X_test, y_test)
lX_train_predict= rf.predict(X_train)
lX_test_predict= rf.predict(X_test)
ly_train_labels= y_train
ly_test_labels= y_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
def LinearSgdFinal(X_train, X_test, y_train, y_test):
sgd = linear_model.SGDClassifier()
sgd.fit(X_train, y_train)
lX_train = sgd.score(X_train, y_train)
lX_test = sgd.score(X_test, y_test)
lX_train_predict= sgd.predict(X_train)
lX_test_predict= sgd.predict(X_test)
ly_train_labels= y_train
ly_test_labels= y_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
ml_func_dict_final = {
# 'GauNaiBay':GauNaiBayFinal,
# 'KNeighbors':KNeighborsFinal,
# 'CSupSvc':CSupSvcFinal,
'RandomForest':RandomForestFinal
# 'ExtraTrees':ExtraTreesFinal
# 'LinearSgd':LinearSgdFinal
}
'''
def LSvm_maker_final(penalty,loss,dual,C):
return lambda fX_train, fX_test, fy_train, fy_test: LSvm_base_final(fX_train, fX_test, fy_train, fy_test, penalty,loss,dual,C)
# Expanded C: [0.01,0.03,0.1,0.3,1.0,3.0,10.0,30.0]
for C in [0.01,0.03,0.1,0.3,1.0,3.0,10.0,30.0]:
ml_func_dict_final['LSvmL1_C'+str(C).replace(".","p")] = LSvm_maker_final(penalty='l1',loss=None,dual=False,C=C)
ml_func_dict_final['LSvmL2_C'+str(C).replace(".","p")] = LSvm_maker_final(penalty='l2',loss='hinge',dual=True,C=C)
def LSvm_base_final(fX_train, fX_test, fy_train, fy_test, penalty='l1', loss=None, dual=False, C=1.0):
# Note: c argument not actually used yet
for i in range(0,len(fX_train)):
if not loss:
lsvm = svm.LinearSVC(penalty=penalty, dual=dual, C=C)
else:
lsvm = svm.LinearSVC(penalty=penalty, dual=dual, loss=loss)
lsvm.fit(fX_train, fy_train)
lX_train = lsvm.score(fX_train, fy_train)
lX_test = lsvm.score(fX_test, fy_test)
lX_train_predict= lsvm.predict(fX_train)
lX_test_predict= lsvm.predict(fX_test)
ly_train_labels= fy_train
ly_test_labels= fy_test
return lX_train, lX_test, lX_train_predict, lX_test_predict, ly_train_labels, ly_test_labels
'''
| gpl-3.0 |
f3r/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
sonium0/pymatgen | pymatgen/io/abinitio/works.py | 1 | 44493 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit:
"""
from __future__ import unicode_literals, division, print_function
import os
import shutil
import time
import abc
import collections
import numpy as np
import six
import copy
from six.moves import filter
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .tasks import (Task, AbinitTask, Dependency, Node, NodeResults, ScfTask, NscfTask, PhononTask, DdkTask,
BseTask, RelaxTask, DdeTask, ScrTask, SigmaTask)
from .strategies import HtcStrategy, NscfStrategy
from .utils import Directory
from .netcdf import ETSF_Reader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super(WorkResults, cls).from_node(work)
#new.update(
# #input=work.strategy
#)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(Exception):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(six.with_metaclass(abc.ABCMeta, Node)):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.tot_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.tot_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_inuse(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.tot_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all task in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self._finalized = True
try:
results = AttrDict(**self.on_all_ok())
except:
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
logger.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
logger.info("Work %s status = %s" % (str(self), self.status))
if self._finalized:
dispatcher.send(signal=self.S_OK, sender=self)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all the tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
class Work(BaseWork):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super(Work, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work.
task.set_manager(manager) if manager is not None else task.set_manager(self.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`Strategy` object or :class:`AbinitInput` instance.
if Strategy object, we create a new `AbinitTask` from the input strategy and add it to the list.
deps: Dictionary specifying the dependency of this node.
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
if isinstance(obj, HtcStrategy):
# Create the new task (note the factory so that we create subclasses easily).
task = task_class(obj, task_workdir, manager)
else:
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies.
if deps is not None:
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register(*args, **kwargs)
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = BseTask
return self.register(*args, **kwargs)
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
#def set_status(self, status):
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
for task in self:
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status < task.S_SUB and all([status == task.S_OK for status in task.deps_status]):
task.set_status(task.S_READY)
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
print(task)
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run or :class:`SCFStrategy` object.
nscf_input: Input for the NSCF run or :class:`NSCFStrategy` object defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(BandStructureWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isistance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.plot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(RelaxWork, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters.
#
# 2) Restarting form DEN is not trivial because Abinit produces all these _TIM?_DEN files.
# and the syntax used to specify dependencies is not powerful enough.
#
# For the time being, we don't use any output from ion_tasl except for the
# the final structure that will be transferred in on_ok.
deps = {self.ion_task: "DEN"}
deps = {self.ion_task: "WFK"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.set_status(self.S_LOCKED)
self.transfer_done = False
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.read_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.set_status(self.S_READY)
return super(RelaxWork, self).on_ok(sender)
class G0W0Work(Work):
"""
Work for G0W0 calculations.
"""
def __init__(self, scf_input, nscf_input, scr_input, sigma_inputs,
workdir=None, manager=None, spread_scr=False, nksmall=None):
"""
Args:
scf_input: Input for the SCF run or :class:`SCFStrategy` object.
nscf_input: Input for the NSCF run or :class:`NSCFStrategy` object.
scr_input: Input for the screening run or :class:`ScrStrategy` object
sigma_inputs: List of Strategies for the self-energy run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
spread_scr: Attach a screening task to every sigma task
if false only one screening task with the max ecuteps and nbands for all sigma tasks
nksmall: if not none add a dos and bands calculation to the Work
"""
super(G0W0Work, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
#MG: FIXME Why this?
if isinstance(scf_input, (list, tuple)):
for single_scf_input in scf_input:
self.scf_task = self.register_scf_task(single_scf_input)
else:
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Register the SCREENING run.
if not spread_scr:
self.scr_task = scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
else:
self.scr_tasks = []
if nksmall:
# if nksmall add bandstructure and dos calculations as well
from abiobjects import KSampling
scf_in = scf_input[-1] if isinstance(scf_input, (list, tuple)) else scf_input
logger.info('added band structure calculation')
bands_input = NscfStrategy(scf_strategy=scf_in,
ksampling=KSampling.path_from_structure(ndivsm=nksmall, structure=scf_in.structure),
nscf_nband=scf_in.electrons.nband*2, ecut=scf_in.ecut)
self.bands_task = self.register_nscf_task(bands_input, deps={self.scf_task: "DEN"})
dos_input = NscfStrategy(scf_strategy=scf_in,
ksampling=KSampling.automatic_density(kppa=nksmall**3, structure=scf_in.structure,
shifts=(0.0, 0.0, 0.0)),
nscf_nband=scf_in.electrons.nband*2, ecut=scf_in.ecut)
self.dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
self.sigma_tasks = []
for sigma_input in sigma_inputs:
if spread_scr:
new_scr_input = copy.deepcopy(scr_input)
new_scr_input.screening.ecuteps = sigma_input.sigma.ecuteps
new_scr_input.screening.nband = sigma_input.sigma.nband
new_scr_input.electrons.nband = sigma_input.sigma.nband
scr_task = self.register_scr_task(new_scr_input, deps={nscf_task: "WFK"})
task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of Strategies for the self-energy run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super(SigmaConvWork, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run or :class:`ScfStrategy` object.
nscf_input: Input for the NSCF run or :class:`NscfStrategy` object.
bse_inputs: List of Inputs for the BSE run or :class:`BSEStrategy` object.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super(BseMdfWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.strategy.add_extra_abivars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
from pymatgen.io.abinitio.netcdf import NetcdfReader
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
#w.rmtree()
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
#new_task.set_cleanup_exts()
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
logger.debug("will call mrgscr to merge %s:\n" % str(scr_files))
assert len(scr_files) == len(self)
# TODO: Propapagate the manager to the wrappers
mrgscr = wrappers.Mrgscr(verbose=1)
mrgscr.set_mpi_runner("mpirun")
final_scr = mrgscr.merge_qpoints(scr_files, out_prefix="out", cwd=self.outdir.path)
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
def build_oneshot_phononwork(scf_input, ph_inputs, workdir=None, manager=None, work_class=None):
"""
Returns a work for the computation of phonon frequencies
ph_inputs is a list of input for Phonon calculation in which all the independent perturbations
are explicitly computed i.e.
* rfdir 1 1 1
* rfatpol 1 natom
.. warning::
This work is mainly used for simple calculations, e.g. converge studies.
Use :class:`PhononWork` for better efficiency.
"""
work_class = OneShotPhononWork if work_class is None else work_class
work = work_class(workdir=workdir, manager=manager)
scf_task = work.register_scf_task(scf_input)
ph_inputs = [ph_inputs] if not isinstance(ph_inputs, (list, tuple)) else ph_inputs
# cannot use PhononTaks here because the Task is not able to deal with multiple phonon calculations
for phinp in ph_inputs:
ph_task = work.register(phinp, deps={scf_task: "WFK"})
return work
class OneShotPhononWork(Work):
"""
Simple and very inefficient work for the computation of the phonon frequencies
It consists of a GS task and a DFPT calculations for all the independent perturbations.
The main advantage is that one has direct access to the phonon frequencies that
can be computed at the end of the second task without having to call anaddb.
Use ``build_oneshot_phononwork`` to construct this work from the input files.
"""
def read_phonons(self):
"""Read phonon frequencies from the output file."""
#
# Phonon wavevector (reduced coordinates) : 0.00000 0.00000 0.00000
# Phonon energies in Hartree :
# 1.089934E-04 4.990512E-04 1.239177E-03 1.572715E-03 1.576801E-03
# 1.579326E-03
# Phonon frequencies in cm-1 :
# - 2.392128E+01 1.095291E+02 2.719679E+02 3.451711E+02 3.460677E+02
# - 3.466221E+02
BEGIN = " Phonon wavevector (reduced coordinates) :"
END = " Phonon frequencies in cm-1 :"
ph_tasks, qpts, phfreqs = self[1:], [], []
for task in ph_tasks:
with open(task.output_file.path, "r") as fh:
qpt, inside = None, 0
for line in fh:
if line.startswith(BEGIN):
qpts.append([float(s) for s in line[len(BEGIN):].split()])
inside, omegas = 1, []
elif line.startswith(END):
break
elif inside:
inside += 1
if inside > 2:
omegas.extend((float(s) for s in line.split()))
else:
raise ValueError("Cannot find %s in file %s" % (END, task.output_file.path))
phfreqs.append(omegas)
# Use namedtuple to store q-point and frequencies in meV
phonon = collections.namedtuple("phonon", "qpt freqs")
return [phonon(qpt=qpt, freqs=freqs_meV) for qpt, freqs_meV in zip(qpts, EnergyArray(phfreqs, "Ha").to("meV") )]
def get_results(self, **kwargs):
results = super(OneShotPhononWork, self).get_results()
phonons = self.read_phonons()
results.update(phonons=phonons)
return results
class PhononWork(Work):
"""
This work usually consists of nirred Phonon tasks where nirred is
the number of irreducible perturbations for a given q-point.
It provides the callback method (on_all_ok) that calls mrgddb to merge
the partial DDB files and mrgggkk to merge the GKK files.
"""
def merge_ddb_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Returns:
path to the output DDB file
"""
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in self]))
logger.debug("will call mrgddb to merge %s:\n" % str(ddb_files))
# assert len(ddb_files) == len(self)
#if len(ddb_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
# TODO: propagate the taskmanager
mrgddb = wrappers.Mrgddb(verbose=1)
mrgddb.set_mpi_runner("mpirun")
mrgddb.merge(ddb_files, out_ddb=out_ddb, description=desc, cwd=self.outdir.path)
return out_ddb
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
# TODO
# Call anaddb to compute the phonon frequencies for this q-point and
# store the results in the outdir of the work.
#atask = AnaddbTask(anaddb_input, ddb_node,
# gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None)
#atask.start()
return results
| mit |
lucidfrontier45/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 4 | 2176 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same accross
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print __doc__
# Author: Alexandre Gramfort <[email protected]>
# License: BSD Style.
import pylab as pl
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = pl.figure(figsize=(8, 5))
pl.subplot(1, 2, 1)
pl.spy(coef_lasso_)
pl.xlabel('Feature')
pl.ylabel('Time (or Task)')
pl.text(10, 5, 'Lasso')
pl.subplot(1, 2, 2)
pl.spy(coef_multi_task_lasso_)
pl.xlabel('Feature')
pl.ylabel('Time (or Task)')
pl.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
pl.figure()
pl.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
pl.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
pl.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
pl.legend(loc='upper center')
pl.axis('tight')
pl.ylim([-1.1, 1.1])
pl.show()
| bsd-3-clause |
bartnijssen/RVIC | rvic/parameters.py | 1 | 22374 | """
RVIC parameter file development driver
"""
import os
import numpy as np
import pandas as pd
from collections import OrderedDict
from logging import getLogger
from core.log import init_logger, close_logger, LOG_NAME
from core.mpi import LoggingPool
from core.utilities import make_directories, copy_inputs, strip_invalid_char
from core.utilities import read_netcdf, tar_inputs, latlon2yx
from core.utilities import check_ncvars, clean_file, read_domain
from core.utilities import search_for_channel
from core.aggregate import make_agg_pairs, aggregate
from core.make_uh import rout
from core.share import NcGlobals
from core.write import write_agg_netcdf
from core.variables import Point
from core.param_file import finish_params
from core.config import read_config
try:
from core.remap import remap
remap_available = True
except:
remap_available = False
# -------------------------------------------------------------------- #
# Top level driver
def parameters(config_file, numofproc=1):
# ---------------------------------------------------------------- #
# Initilize
uh_box, fdr_data, fdr_vatts, dom_data, \
outlets, config_dict, directories = gen_uh_init(config_file)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Get main logger
log = getLogger(LOG_NAME)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Run
if numofproc > 1:
pool = LoggingPool(processes=numofproc)
for i, (cell_id, outlet) in enumerate(outlets.iteritems()):
log.info('On Outlet #{0} of {1}'.format(i+1, len(outlets)))
pool.apply_async(gen_uh_run,
args=(uh_box, fdr_data, fdr_vatts, dom_data,
outlet, config_dict, directories),
callback=store_result)
pool.close()
pool.join()
outlets = OrderedDict(sorted(results.items(), key=lambda t: t[0]))
else:
for name, outlet in outlets.iteritems():
outlet = gen_uh_run(uh_box, fdr_data, fdr_vatts, dom_data, outlet,
config_dict, directories)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Finally, make the parameter file
gen_uh_final(outlets, dom_data, config_dict, directories)
# ---------------------------------------------------------------- #
return
# -------------------------------------------------------------------- #
def gen_uh_init(config_file):
"""Initialize RVIC parameter"""
# ---------------------------------------------------------------- #
# Read Configuration files
config_dict = read_config(config_file)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Import optional modules
if config_dict['OPTIONS']['REMAP'] and not remap_available:
raise ValueError('Problem importing remap module '
'check to make sure cdo.py is available)')
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Setup Directory Structures
directories = make_directories(config_dict['OPTIONS']['CASE_DIR'],
['plots', 'logs', 'params', 'inputs'])
directories.update(make_directories(config_dict['OPTIONS']['TEMP_DIR'],
['aggregated', 'remapped']))
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# copy inputs to $case_dir/inputs and update configuration
config_dict = copy_inputs(config_file, directories['inputs'])
options = config_dict['OPTIONS']
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Start Logging
log = init_logger(directories['logs'], options['LOG_LEVEL'],
options['VERBOSE'])
for direc in directories:
log.info('%s directory is %s', direc, directories[direc])
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Read Pour Points files
try:
pour_points = pd.read_csv(config_dict['POUR_POINTS']['FILE_NAME'],
comment='#')
log.info('Opened Pour Points File: '
'{0}'.format(config_dict['POUR_POINTS']['FILE_NAME']))
if not (all(x in pour_points.keys() for x in ['lons', 'lats']) or
all(x in pour_points.keys() for x in ['x', 'y'])):
raise ValueError('Pour Points File must include '
'variables (lons, lats) or (x, y)')
if 'names' in pour_points:
pour_points.fillna(inplace=True, value='unknown')
for i, name in enumerate(pour_points.names):
pour_points.names[i] = strip_invalid_char(name)
pour_points.drop_duplicates(inplace=True)
pour_points.dropna()
except Exception as e:
log.error('Error opening pour points file: '
'{0}'.format(config_dict['POUR_POINTS']['FILE_NAME']))
log.exception(e)
raise
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Read uh box file
uh_file = config_dict['UH_BOX']['FILE_NAME']
uh_header = int(config_dict['UH_BOX']['HEADER_LINES'])
uh_box = {}
try:
uh_box['time'], uh_box['func'] = np.genfromtxt(uh_file,
skip_header=uh_header,
delimiter=',',
unpack=True)
log.info('Opened UHbox File: '
'{0}'.format(config_dict['UH_BOX']['FILE_NAME']))
except:
log.exception('Error opening uh_box file: '
'{0}'.format(config_dict['POUR_POINTS']['FILE_NAME']))
raise
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Read FDR file
fdr_file = config_dict['ROUTING']['FILE_NAME']
fdr_var = config_dict['ROUTING']['FLOW_DIRECTION_VAR']
fdr_lat = config_dict['ROUTING']['LATITUDE_VAR']
fdr_lon = config_dict['ROUTING']['LONGITUDE_VAR']
fdr_vel = config_dict['ROUTING']['VELOCITY']
fdr_dif = config_dict['ROUTING']['DIFFUSION']
try:
fdr_data, fdr_vatts, _ = read_netcdf(fdr_file)
fdr_shape = fdr_data[fdr_var].shape
# ---------------------------------------------------------------- #
# Check latitude order, flip if necessary.
if fdr_data[fdr_lat][-1] > fdr_data[fdr_lat][0]:
log.debug('Flow Direction inputs came in upside down, flipping '
'everything now.')
remove_vars = []
for var, data in fdr_data.iteritems():
log.debug('flipping %s', var)
if data.ndim >= 1 and var != fdr_lon:
fdr_data[var] = np.flipud(data)
elif data.ndim == 0:
remove_vars.append(var)
if remove_vars:
for var in remove_vars:
del fdr_data[var]
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Add velocity and/or diffusion grids if not present yet
if not type(fdr_vel) == str:
fdr_data['velocity'] = np.zeros(fdr_shape) + fdr_vel
config_dict['ROUTING']['VELOCITY'] = 'velocity'
log.info('Added velocity grid to fdr_data')
if not type(fdr_dif) == str:
fdr_data['diffusion'] = np.zeros(fdr_shape) + fdr_dif
config_dict['ROUTING']['DIFFUSION'] = 'diffusion'
log.info('Added diffusion grid to fdr_data')
if ('SOURCE_AREA_VAR' not in config_dict['ROUTING'] or
config_dict['ROUTING']['SOURCE_AREA_VAR'] not in fdr_data):
log.warning('Upstream `SOURCE_AREA` was not provided, output '
'source area will be zero.')
config_dict['ROUTING']['SOURCE_AREA_VAR'] = 'src_area'
fdr_data[config_dict['ROUTING']['SOURCE_AREA_VAR']] = \
np.zeros(fdr_shape)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
fdr_data['resolution'] = np.abs(fdr_data[fdr_lon][1] -
fdr_data[fdr_lon][0])
check_ncvars(config_dict['ROUTING'], fdr_data.keys())
# ---------------------------------------------------------------- #
log.info('Opened FDR File: {0}'.format(fdr_file))
except:
log.exception('Error opening FDR file')
raise
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Read domain file
domain = config_dict['DOMAIN']
dom_data, _, _ = read_domain(domain)
log.info('Opened Domain File: '
'{0}'.format(domain['FILE_NAME']))
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# If remap is False, domain coordinates needs to be in the fdr coordinates
# We can move the unit hydrographs to the domain grid later
if options['AGGREGATE'] and not options['REMAP']:
log.error('RVIC parameter generation requires REMAP option to be True'
' if AGGREGATE is True')
raise ValueError('Invalid option')
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Group pour points (if aggregate)
if options['AGGREGATE']:
outlets = make_agg_pairs(pour_points, dom_data, fdr_data, config_dict)
log.info('Finished making agg pairs of '
'pour points and outlet grid cells')
else:
outlets = {}
if all(x in pour_points.keys() for x in ['x', 'y', 'lons', 'lats']):
lats = pour_points['lats'].values
lons = pour_points['lons'].values
routys = pour_points['y'].values
routxs = pour_points['x'].values
elif all(x in pour_points.keys() for x in ['x', 'y']):
# use x and y (assume from routing inputs grid)
# find lons and lats from xs and ys
routys = pour_points['y'].values
routxs = pour_points['x'].values
lats = fdr_data[fdr_lat][routys]
lons = fdr_data[fdr_lon][routxs]
else:
# use lons and lats to find xs and ys
lats = pour_points['lats'].values
lons = pour_points['lons'].values
# find x and y on routing grid
routys, routxs = latlon2yx(plats=lats,
plons=lons,
glats=fdr_data[fdr_lat],
glons=fdr_data[fdr_lon])
if options['SEARCH_FOR_CHANNEL']:
routys, routxs = search_for_channel(
fdr_data[config_dict['ROUTING']['SOURCE_AREA_VAR']],
routys, routxs, tol=10, search=2)
# update lats and lons
lats = fdr_data[fdr_lat][routys]
lons = fdr_data[fdr_lon][routxs]
# Find location on domain grid
domys, domxs = latlon2yx(plats=lats,
plons=lons,
glats=dom_data[domain['LATITUDE_VAR']],
glons=dom_data[domain['LONGITUDE_VAR']])
for i in xrange(len(lats)):
if 'names' in pour_points.keys():
name = pour_points['names'].values[i]
name = name.replace("'", '').replace(" ", "_")
else:
# fill name filed with p-outlet_num
name = 'p-{0}'.format(i)
outlets[i] = Point(lat=lats[i],
lon=lons[i],
domx=domxs[i],
domy=domys[i],
routx=routxs[i],
routy=routys[i],
name=name,
cell_id=dom_data['cell_ids'][domys[i],
domxs[i]])
outlets[i].pour_points = [outlets[i]]
# ---------------------------------------------------------------- #
log.debug(outlets)
log.info('Finished with gen_uh_init')
log.info('-------------------------------------------------------------\n')
return (uh_box, fdr_data, fdr_vatts, dom_data, outlets,
config_dict, directories)
# -------------------------------------------------------------------- #
def gen_uh_run(uh_box, fdr_data, fdr_vatts, dom_data, outlet, config_dict,
directories):
"""
Run Genuh_run
"""
log = getLogger(LOG_NAME)
log.info('Running outlet cell id {0}'.format(outlet.cell_id))
agg_data = {}
domain = config_dict['DOMAIN']
dom_lat = domain['LATITUDE_VAR']
dom_lon = domain['LONGITUDE_VAR']
dom_mask = domain['LAND_MASK_VAR']
options = config_dict['OPTIONS']
# ------------------------------------------------------------ #
# netCDF variable options
ncvaropts = {}
if 'NETCDF_ZLIB' in options:
ncvaropts['zlib'] = options['NETCDF_ZLIB']
if 'NETCDF_COMPLEVEL' in options:
ncvaropts['complevel'] = options['NETCDF_COMPLEVEL']
if 'NETCDF_SIGFIGS' in options:
ncvaropts['least_significant_digit'] = options['NETCDF_SIGFIGS']
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Loop over pour points
for j, pour_point in enumerate(outlet.pour_points):
log.info('On pour_point #{0} of'
' {1}'.format(j+1, len(outlet.pour_points)))
# -------------------------------------------------------- #
# Make the Unit Hydrograph Grid
rout_data = rout(pour_point, uh_box, fdr_data, fdr_vatts,
config_dict['ROUTING'])
log.debug('Done routing to pour_point')
log.debug('rout_data: {0}, '
'{1}'.format(rout_data['unit_hydrograph'].min(),
rout_data['unit_hydrograph'].max()))
log.debug('rout_data sum: {0}, '
'{1}'.format(rout_data['unit_hydrograph'].sum(),
rout_data['fraction'].sum()))
# -------------------------------------------------------- #
# -------------------------------------------------------- #
# aggregate
if options['AGGREGATE']:
if j != len(outlet.pour_points)-1:
agg_data = aggregate(rout_data, agg_data,
res=fdr_data['resolution'])
else:
agg_data = aggregate(rout_data, agg_data,
res=fdr_data['resolution'],
pad=options['AGG_PAD'],
maskandnorm=True)
log.debug('agg_data: {0}, '
'{1}'.format(agg_data['unit_hydrograph'].min(),
agg_data['unit_hydrograph'].max()))
else:
agg_data = rout_data
# -------------------------------------------------------- #
# ------------------------------------------------------------ #
# write temporary file #1
if options['REMAP']:
glob_atts = NcGlobals(title='RVIC Unit Hydrograph Grid File',
RvicPourPointsFile=os.path.split(config_dict['POUR_POINTS']['FILE_NAME'])[1],
RvicUHFile=os.path.split(config_dict['UH_BOX']['FILE_NAME'])[1],
RvicFdrFile=os.path.split(config_dict['ROUTING']['FILE_NAME'])[1],
RvicDomainFile=os.path.split(domain['FILE_NAME'])[1])
temp_file_1 = os.path.join(directories['aggregated'],
'aggUH_{0}.nc'.format(outlet.name.replace(" ", "_")))
write_agg_netcdf(temp_file_1, agg_data, glob_atts,
options['NETCDF_FORMAT'], **ncvaropts)
# -------------------------------------------------------- #
# Remap temporary file #1 to temporary file #2
temp_file_2 = os.path.join(directories['remapped'],
'remapUH_{0}.nc'.format(outlet.name.replace(" ", "_")))
remap(domain['FILE_NAME'], temp_file_1, temp_file_2)
# -------------------------------------------------------- #
# Read temporary file #2
final_data, _, _ = read_netcdf(temp_file_2,
variables=['unit_hydrograph',
'fraction',
dom_lat])
# -------------------------------------------------------- #
# Check latitude order, flip if necessary.
if final_data[dom_lat].ndim == 1:
if final_data[dom_lat][-1] > final_data[dom_lat][0]:
var_list = final_data.keys()
log.debug('Remapped inputs came in upside down, flipping {0}'
' now.'.format(", ".join(var_list)))
# flip lattiutude and fraction along y axis (axis 0)
final_data[dom_lat] = final_data[dom_lat][::-1]
final_data['fraction'] = final_data['fraction'][::-1, :]
# flip unit hydrograph along y axis (axis 1)
final_data['unit_hydrograph'] = final_data['unit_hydrograph'][:, ::-1, :]
assert dom_data['cord_lats'][0] == final_data[dom_lat][0]
# -------------------------------------------------------- #
# -------------------------------------------------------- #
# Clean temporary file #2 (if applicable)
if config_dict['OPTIONS']['CLEAN']:
clean_file(temp_file_1)
clean_file(temp_file_2)
# -------------------------------------------------------- #
# -------------------------------------------------------- #
# Set the domain index offest to zero and use the remapped fraction
# as the final fractions
y0, x0 = 0, 0
final_fracs = final_data['fraction']
# -------------------------------------------------------- #
else:
# -------------------------------------------------------- #
# Put the agg data back onto the original grid
final_data = agg_data
final_fracs = np.zeros_like(fdr_data['velocity'],
dtype=np.float64)
x0 = final_data['dom_x_min']
x1 = final_data['dom_x_max']
y0 = final_data['dom_y_min']
y1 = final_data['dom_y_max']
final_fracs[y0:y1, x0:x1] = final_data['fraction']
# -------------------------------------------------------- #
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Add to "adjust fractions structure"
y, x = np.nonzero((final_fracs > 0.0) * (dom_data[dom_mask] == 1))
yi = y - y0
xi = x - x0
# From final data
outlet.time = np.arange(final_data['unit_hydrograph'].shape[0])
outlet.fractions = final_data['fraction'][yi, xi]
outlet.unit_hydrograph = final_data['unit_hydrograph'][:, yi, xi]
# From domain data
outlet.lon_source = dom_data[dom_lon][y, x]
outlet.lat_source = dom_data[dom_lat][y, x]
outlet.cell_id_source = dom_data['cell_ids'][y, x]
outlet.x_source = x
outlet.y_source = y
# ---------------------------------------------------------------- #
return outlet
# -------------------------------------------------------------------- #
def gen_uh_final(outlets, dom_data, config_dict, directories):
"""
Make the RVIC Parameter File
"""
log = getLogger(LOG_NAME)
log.info('In gen_uh_final')
# ---------------------------------------------------------------- #
# Write the parameter file
param_file, today = finish_params(outlets, dom_data, config_dict,
directories)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# tar the inputs directory / log file
inputs_tar = tar_inputs(directories['inputs'], suffix=today)
log_tar = tar_inputs(log.filename)
log.info('Done with RvicGenParam.')
log.info('Location of Inputs: %s', inputs_tar)
log.info('Location of Log: %s', log_tar)
log.info('Location of Parmeter File %s', param_file)
close_logger()
# ---------------------------------------------------------------- #
return
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# store_result helper function
def store_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
results[result.cell_id] = result
# -------------------------------------------------------------------- #
results = {}
| gpl-3.0 |
Lynn-015/NJU_DMRG | giggleliu/mps/mpo.py | 1 | 1492 | #!/usr/bin/python
'''
Matrix Product State.
'''
from numpy import *
from matplotlib.pyplot import *
from matplotlib import patches
from matplotlib.collections import LineCollection
from scipy.linalg import svd,qr,rq
from scipy import sparse as sps
from utils import bcast_dot
import pdb,time
class OpString(object):
'''
Operator String.
'''
def __init__(self,nsite):
self.__opdict__={}
self.nsite=nsite
def __getitem__(self,l):
return self.__opdict__.get(l)
def __setitem__(self,l,op):
self.__opdict__[l]=op
def __iter__(self):
for i in xrange(self.nsite):
yield self.__opdict__.get(l)
@property
def oplist(self):
'''A list of operators defined on sites.'''
opl=[None]*self.nsite
for l in self.__opdict__:
opl[l]=self.__opdict__[l]
@property
def siteindices(self):
'''The site indices with valid data.'''
return self.__opdict__.keys()
class MPO(object):
'''
Matrix product operator.
WL:
The Matrix product operator datas.
'''
def __init__(self,WL):
self.WL=WL
def __str__(self):
return self.WL.__str__()
def serialize(self):
'''
Return The serialized form of operator.
'''
O=w[0]
for w in self.WL[1:]:
O=O.dot(w)
return O[0,0]
@property
def nsite(self):
'''Number of sites.'''
return len(self.WL)
| mit |
marcocaccin/MarcoGP | predict_alpha.py | 1 | 5693 | #!/usr/bin/env python
import os, pickle, time
try:
os.remove('my_gp_module.pyc')
except OSError:
pass
import scipy as sp
from scipy.linalg import eigh
from my_gp_module import GaussianProcess
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def randint_norepeat(low, exclude=None, high=None, size=None):
l = list(sp.random.randint(low, high=high, size=size))
if exclude is not None:
# remove elements already present in exclude
l = [x for x in l if x not in exclude]
for i in range(size-len(l)):
while True:
new = sp.random.randint(low, high=high)
if new not in exclude and new not in l:
l.append(new)
break
l.sort()
return l
def teach_database_plusone(GP, X, y, X_t, y_t):
# Force all data to be numpy arrays
X, y = sp.asarray(X), sp.asarray(y)
X_t, y_t = sp.asarray(X_t), sp.asarray(y_t)
# From a fixed database (X,y), get alpha of some new configurations if added one at a time
alphas = []
for i, (X_test, y_test) in enumerate(zip(X_t, y_t)):
if y_test.size != 1:
print "ERROR: output space must be 1D. Exiting..."
return
# Test configuration is placed at position 0
X_plus = sp.row_stack((X_test, X))
y_plus = sp.append(y_test, y)
ttt = time.clock()
GP.fit(X_plus, y_plus)
print "TIMER teach", time.clock() - ttt
alphas.append((gp.alpha[0]).flatten().copy())
GP.flush_data()
return sp.array(alphas).flatten()
# --------------------------------------------
# WHAT IT DOES:
# Latest idea by Anatole
# Ntest test configurations, Ndatabases databases. Teach db+1 and check if inverse*inverse*k works
# --------------------------------------------
# --------------------------------------------
# Parameters for the run
# --------------------------------------------
split = 1
N_models = 1
theta0 = 1.0e1
nugget = 1.0e-15
normalise = 1
metric = 'cityblock'
Ntest = 20
Nteach = 500
Ndatabases = 21
target_property = 'T'
database_file = 'qm7.pkl'
# --------------------------------------------
# Load all database
# --------------------------------------------
ttt = time.clock()
if not os.path.exists(database_file): os.system('wget http://www.quantum-machine.org/data/qm7.pkl')
dataset = pickle.load(open(database_file,'r'))
print "TIMER load_data", time.clock() - ttt
test_indices_rec = []
teach_indices_rec = []
alpha_predicted = []
alpha_target = []
energy_target = []
energy_error = []
# --------------------------------------------
# Setup a Gaussian Process once and for all so that parameters do not change
# --------------------------------------------
gp = GaussianProcess(corr='absolute_exponential', theta0=sp.asarray([theta0]),
nugget=nugget, verbose=True, normalise=normalise, do_features_projection=False, low_memory=False, metric=metric)
# --------------------------------------------
# Loop over different training sets of the same size
# --------------------------------------------
for iteration in range(Ndatabases):
# --------------------------------------------
# Pick Ntest configurations randomly
# --------------------------------------------
test_indices = list(sp.random.randint(0, high=dataset[target_property].size, size=Ntest))
db_indices = randint_norepeat(0, exclude=test_indices, high=dataset[target_property].size, size=Nteach)
teach_indices_rec.append(db_indices)
X = dataset['X'][test_indices + db_indices]
T = dataset[target_property][test_indices + db_indices]
print "\n", "-"*60, "\n"
print "db size = %d, iteration %03d" % (Nteach, iteration)
# --------------------------------------------
# Extract feature(s) from training data and test set:
# only sorted eigenvalues of Coulomb matrix in this case
# --------------------------------------------
ttt = time.clock()
eigX = [(eigh(M, eigvals_only=True))[::-1] for M in X]
print "TIMER eval_features", time.clock() - ttt
eigX_t = eigX[:Ntest]
eigX_db = eigX[Ntest:]
y = T.ravel()
y_t = y[:Ntest]
y_db = y[Ntest:]
# --------------------------------------------
# Do len(y_t) teachings by including db + 1 configurations
# --------------------------------------------
alphas = teach_database_plusone(gp, eigX_db, y_db, eigX_t, y_t)
alpha_target.append(alphas)
# --------------------------------------------
# --------------------------------------------
# Second time don't include the test set and predict
# --------------------------------------------
ttt = time.clock()
gp.flush_data()
# Fit to data
gp.fit(eigX_db, y_db)
print "TIMER teach", time.clock() - ttt
beta = sp.dot(gp.inverse, gp.alpha)
y_pred, k = gp.predict(eigX_t, return_k=True)
# --------------------------------------------
# predict the alphas the K-1 * K-1 * k way
# --------------------------------------------
alpha_predicted.append(sp.dot(k, beta.flatten()))
energy_target.append(y_t)
energy_error.append(y_pred - y_t)
# check whether the ML itself is doing sensible things
print "ERROR = ", energy_error[-1]
print "ALPHA TRUE vs. PREDICTED:", alphas, alpha_predicted[-1]
with open('alpha_predictions.txt', 'a') as f:
f.write("n_test_molecules=%d n_databases=%d db_size=%d\n" % (Ntest, Ndatabases, Nteach))
output_data = sp.vstack((sp.array(alpha_target).flatten(), sp.array(alpha_predicted).flatten(), sp.array(energy_target).flatten(), sp.array(energy_error).flatten()))
sp.savetxt(f, output_data.T)
f.close()
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/io/tests/test_packers.py | 9 | 21638 | import nose
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, SparseSeries, SparseDataFrame,
SparsePanel)
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean, assert_index_equal
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas.sparse.tests.test_sparse import assert_sp_series_equal, assert_sp_frame_equal
from pandas import Timestamp, tslib
nan = np.nan
from pandas.io.packers import to_msgpack, read_msgpack
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10,2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result,df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result,df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result,df)
s = to_msgpack(None,df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p,'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [ DataFrame(np.random.randn(10,2)) for i in range(5) ]
s = to_msgpack(None,*dfs)
for i, result in enumerate(read_msgpack(s,iterator=True)):
tm.assert_frame_equal(result,dfs[i])
def test_invalid_arg(self):
#GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32','float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(
2013, 1, 1), datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1), np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ('foo', 'two'),
('qux', 'one'), ('qux', 'two')], names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'),Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'),Timestamp('20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
# this currently fails
self.assertRaises(UnicodeEncodeError, self.encode_decode, i)
#i_rec = self.encode_decode(i)
#self.assertTrue(i.equals(i_rec))
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(dict([(k, data[k]) for k in ['A', 'B', 'C', 'D']]))}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'], ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple(
[self.frame['float'], self.frame['float'].A, self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1]*100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
#i_rec = self.encode_decode(obj)
#comparator(obj, i_rec, **kwargs)
self.assertRaises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pydata/pandas/pull/9783
"""
def setUp(self):
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def test_compression_zlib(self):
i_rec = self.encode_decode(self.frame, compress='zlib')
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def test_compression_blosc(self):
try:
import blosc
except ImportError:
raise nose.SkipTest('no blosc')
i_rec = self.encode_decode(self.frame, compress='blosc')
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
class TestEncoding(TestPackers):
def setUp(self):
super(TestEncoding, self).setUp()
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
class TestMsgpack():
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
NOTE: TestMsgpack can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_msgpack_data, create_data)
self.data = create_msgpack_data()
self.all_data = create_data()
self.path = u('__%s__.msgpack' % tm.rands(10))
self.minimum_structure = {'series': ['float', 'int', 'mixed', 'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
assert kind in data[typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
def compare(self, vf, version):
data = read_msgpack(vf)
self.check_min_structure(data)
for typ, dv in data.items():
assert typ in self.all_data, 'unpacked data contains extra key "{0}"'.format(typ)
for dt, result in dv.items():
assert dt in self.all_data[typ], 'data["{0}"] contains extra key "{1}"'.format(typ, dt)
try:
expected = self.data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comparator = getattr(self,"compare_{typ}_{dt}".format(typ=typ,dt=dt), None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def read_msgpacks(self, version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
self.compare(vf, version)
n += 1
assert n > 0, 'Msgpack files are not tested'
def test_msgpack(self):
msgpack_path = tm.get_data_path('legacy_msgpack')
n = 0
for v in os.listdir(msgpack_path):
pth = os.path.join(msgpack_path, v)
if os.path.isdir(pth):
yield self.read_msgpacks, v
n += 1
assert n > 0, 'Msgpack files are not tested'
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| artistic-2.0 |
sfairhur/pycbc | examples/distributions/spin_spatial_distr_example.py | 14 | 1973 | import numpy
import matplotlib.pyplot as plt
import pycbc.coordinates as co
from mpl_toolkits.mplot3d import Axes3D
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in units
# of pi so we use between 0 and 1.
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi.
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py
# Here we are using the Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution.
# In this case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000)
# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a
# 3rd dimension for a 3D plot that we make later on.
spin_mag = numpy.ndarray(shape=(10000), dtype=float)
for i in range(0,10000):
spin_mag[i] = 1.
# Use pycbc.coordinates as co. Use spherical_to_cartesian function to
# convert from spherical polar coordinates to cartesian coordinates.
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Plot the spherical distribution of spins to make sure that we
# distributed across the surface of a sphere.
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spinx, spiny, spinz, s=1)
ax.set_xlabel('Spin X Axis')
ax.set_ylabel('Spin Y Axis')
ax.set_zlabel('Spin Z Axis')
plt.show()
| gpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/core/missing.py | 7 | 21165 | """
Routines for filling missing data
"""
import numpy as np
from distutils.version import LooseVersion
import pandas.algos as algos
import pandas.lib as lib
from pandas.compat import range, string_types
from pandas.types.common import (is_numeric_v_string_like,
is_float_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_integer_dtype,
_ensure_float64, is_scalar)
from pandas.types.missing import isnull
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
if not isinstance(values_to_mask, (list, np.ndarray)):
values_to_mask = [values_to_mask]
try:
values_to_mask = np.array(values_to_mask, dtype=arr.dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isnull(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isnull(arr)
else:
mask |= isnull(arr)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, 'asfreq']:
return None
if isinstance(method, string_types):
method = method.lower()
if method == 'ffill':
method = 'pad'
elif method == 'bfill':
method = 'backfill'
valid_methods = ['pad', 'backfill']
expecting = 'pad (ffill) or backfill (bfill)'
if allow_nearest:
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting %s. Got %s' %
(expecting, method))
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh',
'piecewise_polynomial', 'pchip', 'akima', 'spline',
'from_derivatives']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {0}."
"Got '{1}' instead.".format(valid, method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isnull(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
def _interp_limit(invalid, fw_limit, bw_limit):
"Get idx of values that won't be filled b/c they exceed the limits."
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
raise ValueError('Invalid limit_direction: expecting one of %r, got '
'%r.' % (valid_limit_directions, limit_direction))
from pandas import Series
ys = Series(yvalues)
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
# This is a list of the indexes in the series whose yvalue is currently
# NaN, but whose interpolated yvalue will be overwritten with NaN after
# computing the interpolation. For each index in this list, one of these
# conditions is true of the corresponding NaN in the yvalues:
#
# a) It is one of a chain of NaNs at the beginning of the series, and
# either limit is not specified or limit_direction is 'forward'.
# b) It is one of a chain of NaNs at the end of the series, and limit is
# specified and limit_direction is 'backward' or 'both'.
# c) Limit is nonzero and it is further than limit from the nearest non-NaN
# value (with respect to the limit_direction setting).
#
# The default behavior is to fill forward with no limit, ignoring NaNs at
# the beginning (see issues #9218 and #10420)
violate_limit = sorted(start_nans)
if limit:
if limit_direction == 'forward':
violate_limit = sorted(start_nans | set(_interp_limit(invalid,
limit, 0)))
if limit_direction == 'backward':
violate_limit = sorted(end_nans | set(_interp_limit(invalid, 0,
limit)))
if limit_direction == 'both':
violate_limit = sorted(_interp_limit(invalid, limit, limit))
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order, **kwargs)
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
# TODO: Why is DatetimeIndex being imported here?
from pandas import DatetimeIndex # noqa
except ImportError:
raise ImportError('{0} interpolation requires Scipy'.format(method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'from_derivatives': _from_derivatives,
'piecewise_polynomial': _from_derivatives,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype('i8'), new_x.astype('i8')
if method == 'pchip':
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError("Your version of Scipy does not support "
"PCHIP interpolation.")
elif method == 'akima':
try:
from scipy.interpolate import Akima1DInterpolator # noqa
alt_methods['akima'] = _akima_interpolate
except ImportError:
raise ImportError("Your version of Scipy does not support "
"Akima interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
# GH #10633
if not order:
raise ValueError("order needs to be specified and greater than 0")
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
import scipy
from scipy import interpolate
if LooseVersion(scipy.__version__) < '0.18.0':
try:
method = interpolate.piecewise_polynomial_interpolate
return method(xi, yi.reshape(-1, 1), x,
orders=order, der=der)
except AttributeError:
pass
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
try:
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
except TypeError:
# Scipy earlier than 0.17.0 missing axis
P = interpolate.Akima1DInterpolator(xi, yi)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1, ) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64, np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is None or is_float_dtype(result):
return result
if name.startswith(('r', '__r')):
x, y = y, x
is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
# GH 7325, mask and nans must be broadcastable (also: PR 9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype('float64', copy=False).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
signs = np.sign(y if name.startswith(('r', '__r')) else x)
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name: # (PR 9308)
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/svm/tests/test_sparse.py | 2 | 7677 | import numpy as np
from scipy import sparse
from sklearn import datasets, svm, linear_model, base
from numpy.testing import assert_array_almost_equal, \
assert_array_equal, assert_equal
from nose.tools import assert_raises, assert_true
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm.tests import test_svm
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear').fit(X, Y)
sp_clf = svm.SVC(kernel='linear').fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
def test_svc_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC().fit(X, Y)
sp_clf = svm.LinearSVC().fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
def test_linearsvc_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC().fit(iris.data, iris.target)
clf = svm.LinearSVC().fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.label_, sp_clf.label_)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.todense()))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.todense(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.todense())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
def test_sparse_svc_clone_with_callable_kernel():
# first, test that we raise a value error for "sparse kernels"
# this test is only relevant for the deprecated sparse.SVC class.
sp = svm.sparse.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True)
assert_raises(ValueError, sp.fit, X_sp, Y)
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
if __name__ == '__main__':
import nose
nose.runmodule()
| agpl-3.0 |
PrashntS/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
RangerKD/RFS-SLAM | scripts/componentTesting/spatialIndexTreeTestVisualizer.py | 1 | 1061 | #!/usr/bin/python
import sys
import os.path
import matplotlib
import matplotlib.pyplot as plt
import argparse
import numpy as np
parser = argparse.ArgumentParser(description="Map Octree Visualizer")
parser.add_argument("dataFile", help="path to tree data")
args = parser.parse_args()
if os.path.exists(args.dataFile):
print('Opening ' + args.dataFile)
else:
print(args.dataFile + ' does not exist');
sys.exit(0);
treeData = np.genfromtxt(args.dataFile)
pt = np.empty([0,2])
box = np.empty([0,5])
for i in range(0, treeData.shape[0]):
if( treeData[i][1] == treeData[i][3] and treeData[i][2] == treeData[i][4] ): # point
pt = np.vstack( [pt, treeData[i,1:3]] )
else: #box
box = np.vstack( [box, treeData[i,1:6]] )
print pt
print box
fig = plt.figure()
ax1 = fig.add_subplot(111, aspect='equal')
for i in range (0, box.shape[0]):
ax1.add_patch( matplotlib.patches.Rectangle( box[i,:], box[i,2]-box[i,0], box[i,3]-box[i,1], fill=False ) )
plt.plot(pt[:,0], pt[:,1], marker='+', color='r', linewidth=0)
plt.show()
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
murawaki/lexwave | pca_tree.py | 1 | 4465 | # -*- coding: utf-8 -*-
import sys
from sklearn.decomposition import PCA
import numpy as np
from parse_tree import Node
def extract_mat(root, k):
# if full == False: only leaves are extracted
mat_orig = {}
stack = [root]
size = -1
while len(stack) > 0:
node = stack.pop(0)
vect = map(lambda x: int(x), node.annotation["&" + k][2:-1])
size = len(vect)
mat_orig[node._id] = vect
if hasattr(node, "left"):
stack.append(node.left)
stack.append(node.right)
mat = np.empty((len(mat_orig), size), dtype=np.int32)
for _id, vect in mat_orig.iteritems():
mat[_id] = vect
return mat
def extract_mat_leaves(root, k):
# if full == False: only leaves are extracted
mat_orig = {}
id2idx = {}
idx2node = {}
stack = [root]
size = -1
while len(stack) > 0:
node = stack.pop(0)
if hasattr(node, "left"):
stack.append(node.left)
stack.append(node.right)
else:
vect = map(lambda x: int(x), node.annotation["&" + k][2:-1])
size = len(vect)
idx = id2idx[node._id] = len(id2idx)
idx2node[idx] = node
mat_orig[idx] = vect
mat = np.empty((len(mat_orig), size), dtype=np.int32)
for idx, vect in mat_orig.iteritems():
mat[idx] = vect
return mat, id2idx, idx2node
def do_pca(X):
pca = PCA()
pca = PCA()
U, S, V = pca._fit(X)
X_transformed = np.dot(X - pca.mean_, pca.components_.T)
return pca, X_transformed
def do_pca_new(pca, X):
return np.dot(X - pca.mean_, pca.components_.T)
def plot_rec(node, X_transformed, plt, p1, p2):
_id = node._id
if hasattr(node, "parent"): # non-root
_id2 = node.parent._id
x1, x2 = X_transformed[_id2, p1], X_transformed[_id, p1]
y1, y2 = X_transformed[_id2, p2], X_transformed[_id, p2]
if min(abs(x1 - x2), abs(y1 - y2)) > 0.1:
length_includes_head=True
else:
length_includes_head=False
plt.arrow(x1, y1, x2 - x1, y2 - y1, fc="k", ec="k",
length_includes_head=length_includes_head)
# )
# head_width=0.05, head_length=0.1 )
# plt.annotate("", xy=(x2, y2), xytext=(0, 0),
# arrowprops=dict(arrowstyle="->"))
if hasattr(node, "left"):
plot_rec(node.left, X_transformed, plt, p1, p2)
plot_rec(node.right, X_transformed, plt, p1, p2)
if hasattr(node, "left"):
# internal
plt.scatter(X_transformed[_id, p1], X_transformed[_id, p2], c="r", s=30)
if not hasattr(node, "parent"): # root
plt.annotate("ROOT", (X_transformed[_id, p1], X_transformed[_id, p2]))
else:
# leaf
x, y = X_transformed[_id, p1], X_transformed[_id, p2]
plt.scatter(x, y, c="g", s=120)
plt.annotate(node.name, (x, y),
xytext=(x + 0.10, y + 0.05))
def main():
# usage: input key [output]
# key: japanese, Ainu_UCLD_GRRW_SDollo, Koreanic_CovUCLD
import cPickle as pickle
root = pickle.load(open(sys.argv[1]))
use_internal = False
X = extract_mat(root, sys.argv[2])
if use_internal:
pca, X_transformed = do_pca(X)
else:
Y, id2idx, idx2node = extract_mat_leaves(root, sys.argv[2])
pca, Y_transformed = do_pca(Y)
X_transformed = do_pca_new(pca, X)
# OJ
p1 = 0
k = 'OJ'
t = 0
d = {}
for idx, node in idx2node.iteritems():
d[idx] = Y_transformed[idx][p1]
if node.name == k:
t = Y_transformed[idx][p1]
sidx = sorted(idx2node.keys(), key=lambda x: abs(d[x] - t))
for idx in sidx:
sys.stdout.write("%s\t%f\n" % (idx2node[idx].name, d[idx] - t))
exit(0)
import matplotlib.pyplot as plt
p1, p2 = 0, 1 # first and second PCs (zero-based numbering)
plt.figure(figsize=(8, 6), dpi=120)
plt.xlabel("PC%d (%2.1f%%)" % (p1 + 1, pca.explained_variance_ratio_[p1] * 100))
plt.ylabel("PC%d (%2.1f%%)" % (p2 + 1, pca.explained_variance_ratio_[p2] * 100))
plot_rec(root, X_transformed, plt, p1, p2)
plt.legend()
# plt.title('PCA')
# plt.xlim([-2.5, 1.5])
# plt.ylim([-1.5, 2.5])
if len(sys.argv) > 3:
plt.savefig(sys.argv[3], format="png", transparent=False, dpi=160)
plt.show()
if __name__ == "__main__":
main()
| bsd-2-clause |
siddharthhparikh/INFM750-project | boosted_gmm.py | 1 | 2154 | import matplotlib.pyplot as plt
import csv
from GMM_classifier import gmm_classifier
from sklearn.ensemble import AdaBoostRegressor
import random
import numpy as np
data = {}
with open('datasets/data_boston.csv', 'r') as csvfile:
csvfile.readline()
file = csv.reader(csvfile, delimiter=',')
for row in file:
if data.has_key(row[5]):
data[row[5]].append([float(row[14]), float(row[15]), row[5]])
else:
data[row[5]] = [[float(row[14]), float(row[15]), row[5]]]
test_data_list = []
train_data_list = []
for key,value in data.iteritems():
if len(value) > 15000:
for val in value[:15000]:
train_data_list.append(val)
for val in value[15000:19000]:
test_data_list.append(val)
del data
"""
train_data = np.array([[train_data_list[0][0], train_data_list[0][1]]])
train_data_label = np.array([[train_data_list[0][2]]])
test_data = np.empty(4000)
test_data_label = np.empty(4000)
"""
"""
train_data = np.zeros((15000,2))
train_data_label = []
test_data = np.zeros((4000,2))
test_data_label = []
i=0
for item in train_data_list[0]:
train_data[i,0] = item[0]
train_data[i,1] = item[1]
train_data_label.append(item[2])
i=i+1
i=0
for item in test_data_list[0]:
test_data[i,0] = item[0]
test_data[i,1] = item[1]
test_data_label.append(item[2])
i=i+1
del test_data_list
del train_data_list
"""
d = {
'Improper storage trash: res' : 0,
'Overgrown Weeds On Property' : 1,
'Failure clear sidewalk - snow' : 2,
'Overfilling of barrel/dumpster' : 3
}
train_data = list()
train_data_label = list()
test_data = list()
test_data_label = list()
random.shuffle(train_data_list)
random.shuffle(test_data_list)
for item in train_data_list:
train_data.append([item[0], item[1]])
train_data_label.append(d[item[2]])
for item in test_data_list:
test_data.append([item[0], item[1]])
test_data_label.append(d[item[2]])
print len(train_data), len(train_data_label), len(train_data_list)
regr = gmm_classifier()
regr.fit(train_data,train_data_label)
y_predict = regr.predict(test_data)
print y_predict
correct = 0
for a,b in zip(y_predict, test_data_label):
if a == b:
correct = correct+1
print float(correct)/len(test_data_label)
| apache-2.0 |
yukke42/machine-learning | 2/p30.py | 1 | 2222 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() - 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() - 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
class Perceptron(object):
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, Y):
"""
paramater
# X.shape = [n_samples, n_features]
# Y.shape = [n_samples]
return
# object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
y = df.iloc[0:150, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:150, [0,2]].values
ppn = Perceptron(eta=0.01, n_iter=10)
ppn.fit(X, y)
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal lenght')
plt.ylabel('petal lenght')
plt.legend(loc='upper left')
plt.show()
| mit |
all-umass/metric-learn | test/test_utils.py | 1 | 43506 | import pytest
from collections import namedtuple
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from sklearn.model_selection import train_test_split
from sklearn.exceptions import DataConversionWarning
from sklearn.utils import check_random_state, shuffle
from sklearn.utils.testing import set_random_state
from sklearn.base import clone
from metric_learn._util import (check_input, make_context, preprocess_tuples,
make_name, preprocess_points,
check_collapsed_pairs, validate_vector)
from metric_learn import (ITML, LSML, MMC, RCA, SDML, Covariance, LFDA,
LMNN, MLKR, NCA, ITML_Supervised, LSML_Supervised,
MMC_Supervised, RCA_Supervised, SDML_Supervised,
Constraints)
from metric_learn.base_metric import (ArrayIndexer, MahalanobisMixin,
_PairsClassifierMixin,
_QuadrupletsClassifierMixin)
from metric_learn.exceptions import PreprocessorError
from sklearn.datasets import make_regression, make_blobs, load_iris
SEED = 42
RNG = check_random_state(SEED)
Dataset = namedtuple('Dataset', ('data target preprocessor to_transform'))
# Data and target are what we will fit on. Preprocessor is the additional
# data if we use a preprocessor (which should be the default ArrayIndexer),
# and to_transform is some additional data that we would want to transform
@pytest.fixture
def build_classification(with_preprocessor=False):
"""Basic array for testing when using a preprocessor"""
X, y = shuffle(*make_blobs(random_state=SEED),
random_state=SEED)
indices = shuffle(np.arange(X.shape[0]), random_state=SEED).astype(int)
if with_preprocessor:
return Dataset(indices, y[indices], X, indices)
else:
return Dataset(X[indices], y[indices], None, X[indices])
@pytest.fixture
def build_regression(with_preprocessor=False):
"""Basic array for testing when using a preprocessor"""
X, y = shuffle(*make_regression(n_samples=100, n_features=5,
random_state=SEED),
random_state=SEED)
indices = shuffle(np.arange(X.shape[0]), random_state=SEED).astype(int)
if with_preprocessor:
return Dataset(indices, y[indices], X, indices)
else:
return Dataset(X[indices], y[indices], None, X[indices])
def build_data():
input_data, labels = load_iris(return_X_y=True)
X, y = shuffle(input_data, labels, random_state=SEED)
num_constraints = 50
constraints = Constraints(y)
pairs = (
constraints
.positive_negative_pairs(num_constraints, same_length=True,
random_state=check_random_state(SEED)))
return X, pairs
def build_pairs(with_preprocessor=False):
# builds a toy pairs problem
X, indices = build_data()
c = np.vstack([np.column_stack(indices[:2]), np.column_stack(indices[2:])])
target = np.concatenate([np.ones(indices[0].shape[0]),
- np.ones(indices[0].shape[0])])
c, target = shuffle(c, target, random_state=SEED)
if with_preprocessor:
# if preprocessor, we build a 2D array of pairs of indices
return Dataset(c, target, X, c[:, 0])
else:
# if not, we build a 3D array of pairs of samples
return Dataset(X[c], target, None, X[c[:, 0]])
def build_quadruplets(with_preprocessor=False):
# builds a toy quadruplets problem
X, indices = build_data()
c = np.column_stack(indices)
target = np.ones(c.shape[0]) # quadruplets targets are not used
# anyways
c, target = shuffle(c, target, random_state=SEED)
if with_preprocessor:
# if preprocessor, we build a 2D array of quadruplets of indices
return Dataset(c, target, X, c[:, 0])
else:
# if not, we build a 3D array of quadruplets of samples
return Dataset(X[c], target, None, X[c[:, 0]])
quadruplets_learners = [(LSML(), build_quadruplets)]
ids_quadruplets_learners = list(map(lambda x: x.__class__.__name__,
[learner for (learner, _) in
quadruplets_learners]))
pairs_learners = [(ITML(), build_pairs),
(MMC(max_iter=2), build_pairs), # max_iter=2 for faster
(SDML(), build_pairs),
]
ids_pairs_learners = list(map(lambda x: x.__class__.__name__,
[learner for (learner, _) in
pairs_learners]))
classifiers = [(Covariance(), build_classification),
(LFDA(), build_classification),
(LMNN(), build_classification),
(NCA(), build_classification),
(RCA(), build_classification),
(ITML_Supervised(max_iter=5), build_classification),
(LSML_Supervised(), build_classification),
(MMC_Supervised(max_iter=5), build_classification),
(RCA_Supervised(num_chunks=10), build_classification),
(SDML_Supervised(), build_classification)
]
ids_classifiers = list(map(lambda x: x.__class__.__name__,
[learner for (learner, _) in
classifiers]))
regressors = [(MLKR(), build_regression)]
ids_regressors = list(map(lambda x: x.__class__.__name__,
[learner for (learner, _) in regressors]))
WeaklySupervisedClasses = (_PairsClassifierMixin,
_QuadrupletsClassifierMixin)
tuples_learners = pairs_learners + quadruplets_learners
ids_tuples_learners = ids_pairs_learners + ids_quadruplets_learners
supervised_learners = classifiers + regressors
ids_supervised_learners = ids_classifiers + ids_regressors
metric_learners = tuples_learners + supervised_learners
ids_metric_learners = ids_tuples_learners + ids_supervised_learners
def mock_preprocessor(indices):
"""A preprocessor for testing purposes that returns an all ones 3D array
"""
return np.ones((indices.shape[0], 3))
@pytest.mark.parametrize('type_of_inputs', ['other', 'tuple', 'classics', 2,
int, NCA()])
def test_check_input_invalid_type_of_inputs(type_of_inputs):
"""Tests that an invalid type of inputs in check_inputs raises an error."""
with pytest.raises(ValueError) as e:
check_input([[0.2, 2.1], [0.2, .8]], type_of_inputs=type_of_inputs)
msg = ("Unknown value {} for type_of_inputs. Valid values are "
"'classic' or 'tuples'.".format(type_of_inputs))
assert str(e.value) == msg
# ---------------- test check_input with 'tuples' type_of_input' ------------
@pytest.fixture
def tuples_prep():
"""Basic array for testing when using a preprocessor"""
tuples = np.array([[1, 2],
[2, 3]])
return tuples
@pytest.fixture
def tuples_no_prep():
"""Basic array for testing when using no preprocessor"""
tuples = np.array([[[1., 2.3], [2.3, 5.3]],
[[2.3, 4.3], [0.2, 0.4]]])
return tuples
@pytest.mark.parametrize('estimator, expected',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
def test_make_context(estimator, expected):
"""test the make_name function"""
assert make_context(estimator) == expected
@pytest.mark.parametrize('estimator, expected',
[(NCA(), "NCA"), ('NCA', "NCA"), (None, None)])
def test_make_name(estimator, expected):
"""test the make_name function"""
assert make_name(estimator) == expected
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('load_tuples, preprocessor',
[(tuples_prep, mock_preprocessor),
(tuples_no_prep, None),
(tuples_no_prep, mock_preprocessor)])
def test_check_tuples_invalid_tuple_size(estimator, context, load_tuples,
preprocessor):
"""Checks that the exception are raised if tuple_size is not the one
expected"""
tuples = load_tuples()
preprocessed_tuples = (preprocess_tuples(tuples, preprocessor)
if (preprocessor is not None and
tuples.ndim == 2) else tuples)
expected_msg = ("Tuples of 3 element(s) expected{}. Got tuples of 2 "
"element(s) instead (shape={}):\ninput={}.\n"
.format(context, preprocessed_tuples.shape,
preprocessed_tuples))
with pytest.raises(ValueError) as raised_error:
check_input(tuples, type_of_inputs='tuples', tuple_size=3,
preprocessor=preprocessor, estimator=estimator)
assert str(raised_error.value) == expected_msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('tuples, found, expected, preprocessor',
[(5, '0', '2D array of indicators or 3D array of '
'formed tuples', mock_preprocessor),
(5, '0', '3D array of formed tuples', None),
([1, 2], '1', '2D array of indicators or 3D array '
'of formed tuples', mock_preprocessor),
([1, 2], '1', '3D array of formed tuples', None),
([[[[5]]]], '4', '2D array of indicators or 3D array'
' of formed tuples',
mock_preprocessor),
([[[[5]]]], '4', '3D array of formed tuples', None),
([[1], [3]], '2', '3D array of formed '
'tuples', None)])
def test_check_tuples_invalid_shape(estimator, context, tuples, found,
expected, preprocessor):
"""Checks that a value error with the appropriate message is raised if
shape is invalid (not 2D with preprocessor or 3D with no preprocessor)
"""
tuples = np.array(tuples)
msg = ("{} expected{}{}. Found {}D array instead:\ninput={}. Reshape your "
"data{}.\n"
.format(expected, context, ' when using a preprocessor'
if preprocessor else '', found, tuples,
' and/or use a preprocessor' if
(not preprocessor and tuples.ndim == 2) else ''))
with pytest.raises(ValueError) as raised_error:
check_input(tuples, type_of_inputs='tuples',
preprocessor=preprocessor, ensure_min_samples=0,
estimator=estimator)
assert str(raised_error.value) == msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
def test_check_tuples_invalid_n_features(estimator, context, tuples_no_prep):
"""Checks that the right warning is printed if not enough features
Here we only test if no preprocessor (otherwise we don't ensure this)
"""
msg = ("Found array with 2 feature(s) (shape={}) while"
" a minimum of 3 is required{}.".format(tuples_no_prep.shape,
context))
with pytest.raises(ValueError) as raised_error:
check_input(tuples_no_prep, type_of_inputs='tuples',
preprocessor=None, ensure_min_features=3,
estimator=estimator)
assert str(raised_error.value) == msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('load_tuples, preprocessor',
[(tuples_prep, mock_preprocessor),
(tuples_no_prep, None),
(tuples_no_prep, mock_preprocessor)])
def test_check_tuples_invalid_n_samples(estimator, context, load_tuples,
preprocessor):
"""Checks that the right warning is printed if n_samples is too small"""
tuples = load_tuples()
msg = ("Found array with 2 sample(s) (shape={}) while a minimum of 3 "
"is required{}.".format((preprocess_tuples(tuples, preprocessor)
if (preprocessor is not None and
tuples.ndim == 2) else tuples).shape,
context))
with pytest.raises(ValueError) as raised_error:
check_input(tuples, type_of_inputs='tuples',
preprocessor=preprocessor,
ensure_min_samples=3, estimator=estimator)
assert str(raised_error.value) == msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('load_tuples, preprocessor',
[(tuples_prep, mock_preprocessor),
(tuples_no_prep, None),
(tuples_no_prep, mock_preprocessor)])
def test_check_tuples_invalid_dtype_convertible(estimator, context,
load_tuples, preprocessor):
"""Checks that a warning is raised if a convertible input is converted to
float"""
tuples = load_tuples().astype(object) # here the object conversion is
# useless for the tuples_prep case, but this allows to test the
# tuples_prep case
if preprocessor is not None: # if the preprocessor is not None we
# overwrite it to have a preprocessor that returns objects
def preprocessor(indices): #
# preprocessor that returns objects
return np.ones((indices.shape[0], 3)).astype(object)
msg = ("Data with input dtype object was converted to float64{}."
.format(context))
with pytest.warns(DataConversionWarning) as raised_warning:
check_input(tuples, type_of_inputs='tuples',
preprocessor=preprocessor, dtype=np.float64,
warn_on_dtype=True, estimator=estimator)
assert str(raised_warning[0].message) == msg
def test_check_tuples_invalid_dtype_not_convertible_with_preprocessor(
tuples_prep):
"""Checks that a value error is thrown if attempting to convert an
input not convertible to float, when using a preprocessor
"""
def preprocessor(indices):
# preprocessor that returns objects
return np.full((indices.shape[0], 3), 'a')
with pytest.raises(ValueError):
check_input(tuples_prep, type_of_inputs='tuples',
preprocessor=preprocessor, dtype=np.float64)
def test_check_tuples_invalid_dtype_not_convertible_without_preprocessor(
tuples_no_prep):
"""Checks that a value error is thrown if attempting to convert an
input not convertible to float, when using no preprocessor
"""
tuples = np.full_like(tuples_no_prep, 'a', dtype=object)
with pytest.raises(ValueError):
check_input(tuples, type_of_inputs='tuples',
preprocessor=None, dtype=np.float64)
@pytest.mark.parametrize('tuple_size', [2, None])
def test_check_tuples_valid_tuple_size(tuple_size, tuples_prep, tuples_no_prep):
"""For inputs that have the right matrix dimension (2D or 3D for instance),
checks that checking the number of tuples (pairs, quadruplets, etc) raises
no warning if there is the right number of points in a tuple.
"""
with pytest.warns(None) as record:
check_input(tuples_prep, type_of_inputs='tuples',
preprocessor=mock_preprocessor, tuple_size=tuple_size)
check_input(tuples_no_prep, type_of_inputs='tuples', preprocessor=None,
tuple_size=tuple_size)
assert len(record) == 0
@pytest.mark.parametrize('tuples',
[np.array([[2.5, 0.1, 2.6],
[1.6, 4.8, 9.1]]),
np.array([[2, 0, 2],
[1, 4, 9]]),
np.array([["img1.png", "img3.png"],
["img2.png", "img4.png"]]),
[[2, 0, 2],
[1, 4, 9]],
[np.array([2, 0, 2]),
np.array([1, 4, 9])],
((2, 0, 2),
(1, 4, 9)),
np.array([[[1.2, 2.2], [1.4, 3.3]],
[[2.6, 2.3], [3.4, 5.0]]])])
def test_check_tuples_valid_with_preprocessor(tuples):
"""Test that valid inputs when using a preprocessor raises no warning"""
with pytest.warns(None) as record:
check_input(tuples, type_of_inputs='tuples',
preprocessor=mock_preprocessor)
assert len(record) == 0
@pytest.mark.parametrize('tuples',
[np.array([[[2.5], [0.1], [2.6]],
[[1.6], [4.8], [9.1]],
[[5.6], [2.8], [6.1]]]),
np.array([[[2], [0], [2]],
[[1], [4], [9]],
[[1], [5], [3]]]),
[[[2], [0], [2]],
[[1], [4], [9]],
[[3], [4], [29]]],
(((2, 1), (0, 2), (2, 3)),
((1, 2), (4, 4), (9, 3)),
((3, 1), (4, 4), (29, 4)))])
def test_check_tuples_valid_without_preprocessor(tuples):
"""Test that valid inputs when using no preprocessor raises no warning"""
with pytest.warns(None) as record:
check_input(tuples, type_of_inputs='tuples', preprocessor=None)
assert len(record) == 0
def test_check_tuples_behaviour_auto_dtype(tuples_no_prep):
"""Checks that check_tuples allows by default every type if using a
preprocessor, and numeric types if using no preprocessor"""
tuples_prep = [['img1.png', 'img2.png'], ['img3.png', 'img5.png']]
with pytest.warns(None) as record:
check_input(tuples_prep, type_of_inputs='tuples',
preprocessor=mock_preprocessor)
assert len(record) == 0
with pytest.warns(None) as record:
check_input(tuples_no_prep, type_of_inputs='tuples') # numeric type
assert len(record) == 0
# not numeric type
tuples_no_prep = np.array([[['img1.png'], ['img2.png']],
[['img3.png'], ['img5.png']]])
tuples_no_prep = tuples_no_prep.astype(object)
with pytest.raises(ValueError):
check_input(tuples_no_prep, type_of_inputs='tuples')
def test_check_tuples_invalid_complex_data():
"""Checks that the right error message is thrown if given complex data (
this comes from sklearn's check_array's message)"""
tuples = np.array([[[1 + 2j, 3 + 4j], [5 + 7j, 5 + 7j]],
[[1 + 3j, 2 + 4j], [5 + 8j, 1 + 7j]]])
msg = ("Complex data not supported\n"
"{}\n".format(tuples))
with pytest.raises(ValueError) as raised_error:
check_input(tuples, type_of_inputs='tuples')
assert str(raised_error.value) == msg
# ------------- test check_input with 'classic' type_of_inputs ----------------
@pytest.fixture
def points_prep():
"""Basic array for testing when using a preprocessor"""
points = np.array([1, 2])
return points
@pytest.fixture
def points_no_prep():
"""Basic array for testing when using no preprocessor"""
points = np.array([[1., 2.3],
[2.3, 4.3]])
return points
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('points, found, expected, preprocessor',
[(5, '0', '1D array of indicators or 2D array of '
'formed points', mock_preprocessor),
(5, '0', '2D array of formed points', None),
([1, 2], '1', '2D array of formed points', None),
([[[5]]], '3', '1D array of indicators or 2D '
'array of formed points',
mock_preprocessor),
([[[5]]], '3', '2D array of formed points', None)])
def test_check_classic_invalid_shape(estimator, context, points, found,
expected, preprocessor):
"""Checks that a value error with the appropriate message is raised if
shape is invalid (valid being 1D or 2D with preprocessor or 2D with no
preprocessor)
"""
points = np.array(points)
msg = ("{} expected{}{}. Found {}D array instead:\ninput={}. Reshape your "
"data{}.\n"
.format(expected, context, ' when using a preprocessor'
if preprocessor else '', found, points,
' and/or use a preprocessor' if
(not preprocessor and points.ndim == 1) else ''))
with pytest.raises(ValueError) as raised_error:
check_input(points, type_of_inputs='classic', preprocessor=preprocessor,
ensure_min_samples=0,
estimator=estimator)
assert str(raised_error.value) == msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
def test_check_classic_invalid_n_features(estimator, context,
points_no_prep):
"""Checks that the right warning is printed if not enough features
Here we only test if no preprocessor (otherwise we don't ensure this)
"""
msg = ("Found array with 2 feature(s) (shape={}) while"
" a minimum of 3 is required{}.".format(points_no_prep.shape,
context))
with pytest.raises(ValueError) as raised_error:
check_input(points_no_prep, type_of_inputs='classic', preprocessor=None,
ensure_min_features=3,
estimator=estimator)
assert str(raised_error.value) == msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('load_points, preprocessor',
[(points_prep, mock_preprocessor),
(points_no_prep, None),
(points_no_prep, mock_preprocessor)])
def test_check_classic_invalid_n_samples(estimator, context, load_points,
preprocessor):
"""Checks that the right warning is printed if n_samples is too small"""
points = load_points()
msg = ("Found array with 2 sample(s) (shape={}) while a minimum of 3 "
"is required{}.".format((preprocess_points(points,
preprocessor)
if preprocessor is not None and
points.ndim == 1 else
points).shape,
context))
with pytest.raises(ValueError) as raised_error:
check_input(points, type_of_inputs='classic', preprocessor=preprocessor,
ensure_min_samples=3,
estimator=estimator)
assert str(raised_error.value) == msg
@pytest.mark.parametrize('estimator, context',
[(NCA(), " by NCA"), ('NCA', " by NCA"), (None, "")])
@pytest.mark.parametrize('load_points, preprocessor',
[(points_prep, mock_preprocessor),
(points_no_prep, None),
(points_no_prep, mock_preprocessor)])
def test_check_classic_invalid_dtype_convertible(estimator, context,
load_points,
preprocessor):
"""Checks that a warning is raised if a convertible input is converted to
float"""
points = load_points().astype(object) # here the object conversion is
# useless for the points_prep case, but this allows to test the
# points_prep case
if preprocessor is not None: # if the preprocessor is not None we
# overwrite it to have a preprocessor that returns objects
def preprocessor(indices):
# preprocessor that returns objects
return np.ones((indices.shape[0], 3)).astype(object)
msg = ("Data with input dtype object was converted to float64{}."
.format(context))
with pytest.warns(DataConversionWarning) as raised_warning:
check_input(points, type_of_inputs='classic',
preprocessor=preprocessor, dtype=np.float64,
warn_on_dtype=True, estimator=estimator)
assert str(raised_warning[0].message) == msg
@pytest.mark.parametrize('preprocessor, points',
[(mock_preprocessor, np.array([['a', 'b'],
['e', 'b']])),
(None, np.array([[['b', 'v'], ['a', 'd']],
[['x', 'u'], ['c', 'a']]]))])
def test_check_classic_invalid_dtype_not_convertible(preprocessor, points):
"""Checks that a value error is thrown if attempting to convert an
input not convertible to float
"""
with pytest.raises(ValueError):
check_input(points, type_of_inputs='classic',
preprocessor=preprocessor, dtype=np.float64)
@pytest.mark.parametrize('points',
[["img1.png", "img3.png", "img2.png"],
np.array(["img1.png", "img3.png", "img2.png"]),
[2, 0, 2, 1, 4, 9],
range(10),
np.array([2, 0, 2]),
(2, 0, 2),
np.array([[1.2, 2.2],
[2.6, 2.3]])])
def test_check_classic_valid_with_preprocessor(points):
"""Test that valid inputs when using a preprocessor raises no warning"""
with pytest.warns(None) as record:
check_input(points, type_of_inputs='classic',
preprocessor=mock_preprocessor)
assert len(record) == 0
@pytest.mark.parametrize('points',
[np.array([[2.5, 0.1, 2.6],
[1.6, 4.8, 9.1],
[5.6, 2.8, 6.1]]),
np.array([[2, 0, 2],
[1, 4, 9],
[1, 5, 3]]),
[[2, 0, 2],
[1, 4, 9],
[3, 4, 29]],
((2, 1, 0, 2, 2, 3),
(1, 2, 4, 4, 9, 3),
(3, 1, 4, 4, 29, 4))])
def test_check_classic_valid_without_preprocessor(points):
"""Test that valid inputs when using no preprocessor raises no warning"""
with pytest.warns(None) as record:
check_input(points, type_of_inputs='classic', preprocessor=None)
assert len(record) == 0
def test_check_classic_by_default():
"""Checks that 'classic' is the default behaviour of check_input"""
assert (check_input([[2, 3], [3, 2]]) ==
check_input([[2, 3], [3, 2]], type_of_inputs='classic')).all()
def test_check_classic_behaviour_auto_dtype(points_no_prep):
"""Checks that check_input (for points) allows by default every type if
using a preprocessor, and numeric types if using no preprocessor"""
points_prep = ['img1.png', 'img2.png', 'img3.png', 'img5.png']
with pytest.warns(None) as record:
check_input(points_prep, type_of_inputs='classic',
preprocessor=mock_preprocessor)
assert len(record) == 0
with pytest.warns(None) as record:
check_input(points_no_prep, type_of_inputs='classic') # numeric type
assert len(record) == 0
# not numeric type
points_no_prep = np.array(['img1.png', 'img2.png', 'img3.png',
'img5.png'])
points_no_prep = points_no_prep.astype(object)
with pytest.raises(ValueError):
check_input(points_no_prep, type_of_inputs='classic')
def test_check_classic_invalid_complex_data():
"""Checks that the right error message is thrown if given complex data (
this comes from sklearn's check_array's message)"""
points = np.array([[[1 + 2j, 3 + 4j], [5 + 7j, 5 + 7j]],
[[1 + 3j, 2 + 4j], [5 + 8j, 1 + 7j]]])
msg = ("Complex data not supported\n"
"{}\n".format(points))
with pytest.raises(ValueError) as raised_error:
check_input(points, type_of_inputs='classic')
assert str(raised_error.value) == msg
# ----------------------------- Test preprocessor -----------------------------
X = np.array([[0.89, 0.11, 1.48, 0.12],
[2.63, 1.08, 1.68, 0.46],
[1.00, 0.59, 0.62, 1.15]])
class MockFileLoader:
"""Preprocessor that takes a root file path at construction and simulates
fetching the file in the specific root folder when given the name of the
file"""
def __init__(self, root):
self.root = root
self.folders = {'fake_root': {'img0.png': X[0],
'img1.png': X[1],
'img2.png': X[2]
},
'other_folder': {} # empty folder
}
def __call__(self, path_list):
images = list()
for path in path_list:
images.append(self.folders[self.root][path])
return np.array(images)
def mock_id_loader(list_of_indicators):
"""A preprocessor as a function that takes indicators (strings) and
returns the corresponding samples"""
points = []
for indicator in list_of_indicators:
points.append(X[int(indicator[2:])])
return np.array(points)
tuples_list = [np.array([[0, 1],
[2, 1]]),
np.array([['img0.png', 'img1.png'],
['img2.png', 'img1.png']]),
np.array([['id0', 'id1'],
['id2', 'id1']])
]
points_list = [np.array([0, 1, 2, 1]),
np.array(['img0.png', 'img1.png', 'img2.png', 'img1.png']),
np.array(['id0', 'id1', 'id2', 'id1'])
]
preprocessors = [X, MockFileLoader('fake_root'), mock_id_loader]
@pytest.fixture
def y_tuples():
y = [-1, 1]
return y
@pytest.fixture
def y_points():
y = [0, 1, 0, 0]
return y
@pytest.mark.parametrize('preprocessor, tuples', zip(preprocessors,
tuples_list))
def test_preprocessor_weakly_supervised(preprocessor, tuples, y_tuples):
"""Tests different ways to use the preprocessor argument: an array,
a class callable, and a function callable, with a weakly supervised
algorithm
"""
nca = ITML(preprocessor=preprocessor)
nca.fit(tuples, y_tuples)
@pytest.mark.parametrize('preprocessor, points', zip(preprocessors,
points_list))
def test_preprocessor_supervised(preprocessor, points, y_points):
"""Tests different ways to use the preprocessor argument: an array,
a class callable, and a function callable, with a supervised algorithm
"""
lfda = LFDA(preprocessor=preprocessor)
lfda.fit(points, y_points)
@pytest.mark.parametrize('estimator', ['NCA', NCA(), None])
def test_preprocess_tuples_invalid_message(estimator):
"""Checks that if the preprocessor does some weird stuff, the preprocessed
input is detected as weird. Checks this for preprocess_tuples."""
context = make_context(estimator) + (' after the preprocessor '
'has been applied')
def preprocessor(sequence):
return np.ones((len(sequence), 2, 2)) # returns a 3D array instead of 2D
with pytest.raises(ValueError) as raised_error:
check_input(np.ones((3, 2)), type_of_inputs='tuples',
preprocessor=preprocessor, estimator=estimator)
expected_msg = ("3D array of formed tuples expected{}. Found 4D "
"array instead:\ninput={}. Reshape your data{}.\n"
.format(context, np.ones((3, 2, 2, 2)),
' and/or use a preprocessor' if preprocessor
is not None else ''))
assert str(raised_error.value) == expected_msg
@pytest.mark.parametrize('estimator', ['NCA', NCA(), None])
def test_preprocess_points_invalid_message(estimator):
"""Checks that if the preprocessor does some weird stuff, the preprocessed
input is detected as weird."""
context = make_context(estimator) + (' after the preprocessor '
'has been applied')
def preprocessor(sequence):
return np.ones((len(sequence), 2, 2)) # returns a 3D array instead of 2D
with pytest.raises(ValueError) as raised_error:
check_input(np.ones((3,)), type_of_inputs='classic',
preprocessor=preprocessor, estimator=estimator)
expected_msg = ("2D array of formed points expected{}. "
"Found 3D array instead:\ninput={}. Reshape your data{}.\n"
.format(context, np.ones((3, 2, 2)),
' and/or use a preprocessor' if preprocessor
is not None else ''))
assert str(raised_error.value) == expected_msg
def test_preprocessor_error_message():
"""Tests whether the preprocessor returns a preprocessor error when there
is a problem using the preprocessor
"""
preprocessor = ArrayIndexer(np.array([[1.2, 3.3], [3.1, 3.2]]))
# with tuples
X = np.array([[[2, 3], [3, 3]], [[2, 3], [3, 2]]])
# There are less samples than the max index we want to preprocess
with pytest.raises(PreprocessorError):
preprocess_tuples(X, preprocessor)
# with points
X = np.array([[1], [2], [3], [3]])
with pytest.raises(PreprocessorError):
preprocess_points(X, preprocessor)
@pytest.mark.parametrize('input_data', [[[5, 3], [3, 2]],
((5, 3), (3, 2))
])
@pytest.mark.parametrize('indices', [[0, 1], (1, 0)])
def test_array_like_indexer_array_like_valid_classic(input_data, indices):
"""Checks that any array-like is valid in the 'preprocessor' argument,
and in the indices, for a classic input"""
class MockMetricLearner(MahalanobisMixin):
pass
mock_algo = MockMetricLearner(preprocessor=input_data)
mock_algo._prepare_inputs(indices, type_of_inputs='classic')
@pytest.mark.parametrize('input_data', [[[5, 3], [3, 2]],
((5, 3), (3, 2))
])
@pytest.mark.parametrize('indices', [[[0, 1], [1, 0]], ((1, 0), (1, 0))])
def test_array_like_indexer_array_like_valid_tuples(input_data, indices):
"""Checks that any array-like is valid in the 'preprocessor' argument,
and in the indices, for a classic input"""
class MockMetricLearner(MahalanobisMixin):
pass
mock_algo = MockMetricLearner(preprocessor=input_data)
mock_algo._prepare_inputs(indices, type_of_inputs='tuples')
@pytest.mark.parametrize('preprocessor', [4, NCA()])
def test_error_message_check_preprocessor(preprocessor):
"""Checks that if the preprocessor given is not an array-like or a
callable, the right error message is returned"""
class MockMetricLearner(MahalanobisMixin):
pass
mock_algo = MockMetricLearner(preprocessor=preprocessor)
with pytest.raises(ValueError) as e:
mock_algo.check_preprocessor()
assert str(e.value) == ("Invalid type for the preprocessor: {}. You should "
"provide either None, an array-like object, "
"or a callable.".format(type(preprocessor)))
@pytest.mark.parametrize('estimator', [ITML(), LSML(), MMC(), SDML()],
ids=['ITML', 'LSML', 'MMC', 'SDML'])
def test_error_message_tuple_size(estimator):
"""Tests that if a tuples learner is not given the good number of points
per tuple, it throws an error message"""
estimator = clone(estimator)
set_random_state(estimator)
invalid_pairs = np.array([[[1.3, 6.3], [3., 6.8], [6.5, 4.4]],
[[1.9, 5.3], [1., 7.8], [3.2, 1.2]]])
y = [1, 1]
with pytest.raises(ValueError) as raised_err:
estimator.fit(invalid_pairs, y)
expected_msg = ("Tuples of {} element(s) expected{}. Got tuples of 3 "
"element(s) instead (shape=(2, 3, 2)):\ninput={}.\n"
.format(estimator._tuple_size, make_context(estimator),
invalid_pairs))
assert str(raised_err.value) == expected_msg
@pytest.mark.parametrize('estimator, _', metric_learners,
ids=ids_metric_learners)
def test_error_message_t_score_pairs(estimator, _):
"""tests that if you want to score_pairs on triplets for instance, it returns
the right error message
"""
estimator = clone(estimator)
set_random_state(estimator)
estimator.check_preprocessor()
triplets = np.array([[[1.3, 6.3], [3., 6.8], [6.5, 4.4]],
[[1.9, 5.3], [1., 7.8], [3.2, 1.2]]])
with pytest.raises(ValueError) as raised_err:
estimator.score_pairs(triplets)
expected_msg = ("Tuples of 2 element(s) expected{}. Got tuples of 3 "
"element(s) instead (shape=(2, 3, 2)):\ninput={}.\n"
.format(make_context(estimator), triplets))
assert str(raised_err.value) == expected_msg
def test_preprocess_tuples_simple_example():
"""Test the preprocessor on a very simple example of tuples to ensure the
result is as expected"""
array = np.array([[1, 2],
[2, 3],
[4, 5]])
def fun(row):
return np.array([[1, 1], [3, 3], [4, 4]])
expected_result = np.array([[[1, 1], [1, 1]],
[[3, 3], [3, 3]],
[[4, 4], [4, 4]]])
assert (preprocess_tuples(array, fun) == expected_result).all()
def test_preprocess_points_simple_example():
"""Test the preprocessor on very simple examples of points to ensure the
result is as expected"""
array = np.array([1, 2, 4])
def fun(row):
return [[1, 1], [3, 3], [4, 4]]
expected_result = np.array([[1, 1],
[3, 3],
[4, 4]])
assert (preprocess_points(array, fun) == expected_result).all()
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_same_with_or_without_preprocessor(estimator, build_dataset):
"""Test that algorithms using a preprocessor behave consistently
# with their no-preprocessor equivalent
"""
dataset_indices = build_dataset(with_preprocessor=True)
dataset_formed = build_dataset(with_preprocessor=False)
X = dataset_indices.preprocessor
indicators_to_transform = dataset_indices.to_transform
formed_points_to_transform = dataset_formed.to_transform
(indices_train, indices_test, y_train, y_test, formed_train,
formed_test) = train_test_split(dataset_indices.data,
dataset_indices.target,
dataset_formed.data,
random_state=SEED)
def make_random_state(estimator):
rs = {}
if estimator.__class__.__name__[-11:] == '_Supervised':
rs['random_state'] = check_random_state(SEED)
return rs
estimator_with_preprocessor = clone(estimator)
set_random_state(estimator_with_preprocessor)
estimator_with_preprocessor.set_params(preprocessor=X)
estimator_with_preprocessor.fit(indices_train, y_train,
**make_random_state(estimator))
estimator_without_preprocessor = clone(estimator)
set_random_state(estimator_without_preprocessor)
estimator_without_preprocessor.set_params(preprocessor=None)
estimator_without_preprocessor.fit(formed_train, y_train,
**make_random_state(estimator))
estimator_with_prep_formed = clone(estimator)
set_random_state(estimator_with_prep_formed)
estimator_with_prep_formed.set_params(preprocessor=X)
estimator_with_prep_formed.fit(indices_train, y_train,
**make_random_state(estimator))
# test prediction methods
for method in ["predict", "decision_function"]:
if hasattr(estimator, method):
output_with_prep = getattr(estimator_with_preprocessor,
method)(indices_test)
output_without_prep = getattr(estimator_without_preprocessor,
method)(formed_test)
assert np.array(output_with_prep == output_without_prep).all()
output_with_prep = getattr(estimator_with_preprocessor,
method)(indices_test)
output_with_prep_formed = getattr(estimator_with_prep_formed,
method)(formed_test)
assert np.array(output_with_prep == output_with_prep_formed).all()
# test score_pairs
output_with_prep = estimator_with_preprocessor.score_pairs(
indicators_to_transform[[[[0, 2], [5, 3]]]])
output_without_prep = estimator_without_preprocessor.score_pairs(
formed_points_to_transform[[[[0, 2], [5, 3]]]])
assert np.array(output_with_prep == output_without_prep).all()
output_with_prep = estimator_with_preprocessor.score_pairs(
indicators_to_transform[[[[0, 2], [5, 3]]]])
output_without_prep = estimator_with_prep_formed.score_pairs(
formed_points_to_transform[[[[0, 2], [5, 3]]]])
assert np.array(output_with_prep == output_without_prep).all()
# test transform
output_with_prep = estimator_with_preprocessor.transform(
indicators_to_transform)
output_without_prep = estimator_without_preprocessor.transform(
formed_points_to_transform)
assert np.array(output_with_prep == output_without_prep).all()
output_with_prep = estimator_with_preprocessor.transform(
indicators_to_transform)
output_without_prep = estimator_with_prep_formed.transform(
formed_points_to_transform)
assert np.array(output_with_prep == output_without_prep).all()
def test_check_collapsed_pairs_raises_no_error():
"""Checks that check_collapsed_pairs raises no error if no collapsed pairs
is present"""
pairs_ok = np.array([[[0.1, 3.3], [3.3, 0.1]],
[[0.1, 3.3], [3.3, 0.1]],
[[2.5, 8.1], [0.1, 3.3]]])
check_collapsed_pairs(pairs_ok)
def test_check_collapsed_pairs_raises_error():
"""Checks that check_collapsed_pairs raises no error if no collapsed pairs
is present"""
pairs_not_ok = np.array([[[0.1, 3.3], [0.1, 3.3]],
[[0.1, 3.3], [3.3, 0.1]],
[[2.5, 8.1], [2.5, 8.1]]])
with pytest.raises(ValueError) as e:
check_collapsed_pairs(pairs_not_ok)
assert str(e.value) == ("2 collapsed pairs found (where the left element is "
"the same as the right element), out of 3 pairs in"
" total.")
def test__validate_vector():
"""Replica of scipy.spatial.tests.test_distance.test__validate_vector"""
x = [1, 2, 3]
y = validate_vector(x)
assert_array_equal(y, x)
y = validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
y = validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x = np.arange(5).reshape(1, -1, 1)
y = validate_vector(x)
assert_equal(y.ndim, 1)
assert_array_equal(y, x[0, :, 0])
x = [[1, 2], [3, 4]]
with pytest.raises(ValueError):
validate_vector(x)
| mit |
terkkila/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
eamontoyaa/pyCSS | functions/plotslice.py | 1 | 4081 | # import modules
import matplotlib.pyplot as plt
'''
# Description.
Plot one slide from its structure.
# Input(s).
Slices dictionary type structures (slicesSTRCell). Has the following fields:
plineCords: contains all the coordiantes that gives the
slice closed polyline;
area: slice polygon area;
midPoint: coordinates of the middle point of the slice at its base;
midHeight: value of the mean height taken the initial,
middle and end heights of the slice at its base to terrain surface;
width: value of width slice;
inclinationAngleGradAtBottom: angle in sexagesimal grades of the
secant line that passess trough the extreme borders of the bottom slice;
inclinationAngleGradAtTop: angle in sexagesimal grades of the
secant line that passess trough the extreme borders of the top slice;
wtMidHeight: value of the mean height taken the initial,
middle and end heights of the slice at its base to watertable surface;
wtMidHeightAboveSlope: value of the mean height taken the initial,
middle and end heights of the water column above slope surface;
hrzMomentArm: value of the horizontal component of the moment arm acting
on the slope due to the water above it;
vrtMomentArm: value of the vertical component of the moment arm acting
on the slope due to the water above it;
# Output(s).
Array containing the coordinates of the slice contourn
(slicePlineCordsArray).
# Example1.
slicesSTR = \
{'area': 18.063276613019383,
'hrzMomentArm': -5.4527963142320601,
'inclinationAngleGradAtBottom': 21.379968728885775,
'inclinationAngleGradAtTop': 68.198590513648185,
'midHeight': 8.6015602919139909,
'midPoint': np.array([ 11.45 , 3.07666551]),
'plineCords': np.array([[ 10.4 , 14.30322581],
[ 10.4 , 3.4877326 ],
[ 11.45 , 3.07666551],
[ 12.5 , 2.66559843],
[ 12.5 , 9.05322581],
[ 10.4 , 14.30322581]]),
'vrtMomentArm': 5.3266677434544878,
'width': 2.0999999999999996,
'wtMidHeight': 8.6015602919139909,
'wtMidHeightAboveSlope': 0.0}
---
slicePlineCordsArray = plotslice(slicesSTR)
'''
def plotslice(slicesSTR):
plt.hold(True)
#Plot the slice contourn and middle point
plt.plot(slicesSTR['plineCords'][:,0], slicesSTR['plineCords']\
[:,1], 'k-', lw=0.5)
plt.plot(slicesSTR['midPoint'][0], slicesSTR['midPoint'][1], 'k.', lw=0.3)
#redirecting the to the output variable
slicePlineCordsArray = slicesSTR['plineCords']
return slicePlineCordsArray
'''
BSD 2 license.
Copyright (c) 2016, Universidad Nacional de Colombia, Ludger O.
Suarez-Burgoa and Exneyder Andrés Montoya Araque.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| bsd-2-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/scipy/stats/_discrete_distns.py | 16 | 21688 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from scipy._lib._numpy_compat import broadcast_to
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = np.maximum(N-(M-n), 0)
self.b = np.minimum(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
self.a = np.where(lambda_ > 0, 0, -np.inf)
self.b = np.where(lambda_ > 0, np.inf, 0)
return lambda_ != 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if self._size is not None:
# Numpy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to self._size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = broadcast_to(low, self._size)
high = broadcast_to(high, self._size)
randint = np.vectorize(self._random_state.randint, otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| gpl-3.0 |
lthurlow/Boolean-Constrained-Routing | working_dir/runs/read_plot.py | 1 | 1247 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import os, sys
import pdb
files_to_read = []
for l in os.listdir('.'):
if l.split('.')[-1] == 'txt':
files_to_read.append(l)
d_l = []
d_e = []
for k in files_to_read:
f = open(k, 'r')
counter = 0
temp_d = {}
temp_e = {}
for line in f:
if '[' in line:
counter = 0
continue
lp = line.split(',')
if counter not in temp_d:
temp_d[counter] = [float(lp[0].strip())]
temp_e[counter] = [float(lp[1].strip())]
else:
temp_d[counter].append(float(lp[0].strip()))
temp_e[counter].append(float(lp[1].strip()))
counter += 1
f.close()
d_l.append((temp_d[1],k))
d_e.append((temp_d[1],k))
counter = 0
for zzz in d_l:
# example data
#x = np.arange(0.1, 4, 0.5)
x = [10,20,30,40,50]
y1 = zzz[0]
# First illustrate basic pyplot interface, using defaults where possible.
plt.plot(x, y1, 'rs-.',c=plt.cm.RdYlBu(counter),label=str(zzz[1]))
counter += 25
plt.title('Time Comparision for Shortest Path')
plt.ylabel('Time (s)')
plt.xlabel('Nodes')
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid('on')
plt.savefig('all.png',bbox_extra_artists=(lgd,), bbox_inches='tight')
| mit |
jgowans/correlation_plotter | plot_one_cross_from_file.py | 1 | 1196 | #!/usr/bin/env python
import argparse
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--f_start', default=200, type=float)
parser.add_argument('--f_stop', default=300, type=float)
parser.add_argument('--d', type=str)
parser.add_argument('--annotate', type=float, default=None)
args = parser.parse_args()
sig = np.load('{d}/0x1.npy'.format(d = args.d))
sig = np.log10(np.abs(sig))
plt.plot(np.linspace(0, 400, len(sig), endpoint=False), sig)
plt.title("Magnitude spectrum of 0x1 at XXXX")
plt.xlabel("Frequency (MHz)")
plt.ylabel("Magnitude (arbitrary units) [log]")
plt.xlim(left = args.f_start, right = args.f_stop)
if args.annotate is not None:
xy = (args.annotate, sig[int(round(1024*(args.annotate/400.0)))])
xytext = (xy[0] - 22, xy[1] + 0.5)
plt.annotate('{f}'.format(f = args.annotate),
xy = xy,
xytext = xytext,
color='red',
arrowprops=dict(facecolor='red', shrink=0.05, width=2, headwidth=6)
)
plt.show()
| mit |
aabadie/scikit-learn | sklearn/calibration.py | 18 | 19402 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y``
is neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/misc/pilutil.py | 9 | 20925 | """
A collection of image utilities using the Python Imaging Library (PIL).
Note that PIL is not a dependency of SciPy and this module is not
available on systems that don't have PIL installed.
"""
from __future__ import division, print_function, absolute_import
# Functions which need the PIL
import numpy
import tempfile
from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis,
transpose, iscomplexobj, uint8, issubdtype, array)
try:
from PIL import Image, ImageFilter
except ImportError:
import Image
import ImageFilter
if not hasattr(Image, 'frombytes'):
Image.frombytes = Image.fromstring
__all__ = ['fromimage', 'toimage', 'imsave', 'imread', 'bytescale',
'imrotate', 'imresize', 'imshow', 'imfilter']
@numpy.deprecate(message="`bytescale` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.")
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(uint8)
@numpy.deprecate(message="`imread` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use ``imageio.imread`` instead.")
def imread(name, flatten=False, mode=None):
"""
Read an image from a file as an array.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
name : str or file object
The file name or file object to be read.
flatten : bool, optional
If True, flattens the color layers into a single gray-scale layer.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes for more
details.
Returns
-------
imread : ndarray
The array obtained by reading the image.
Notes
-----
`imread` uses the Python Imaging Library (PIL) to read an image.
The following notes are from the PIL documentation.
`mode` can be one of the following strings:
* 'L' (8-bit pixels, black and white)
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
* 'RGB' (3x8-bit pixels, true color)
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
* 'CMYK' (4x8-bit pixels, color separation)
* 'YCbCr' (3x8-bit pixels, color video format)
* 'I' (32-bit signed integer pixels)
* 'F' (32-bit floating point pixels)
PIL also provides limited support for a few special modes, including
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
(true color with premultiplied alpha).
When translating a color image to black and white (mode 'L', 'I' or
'F'), the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
When `flatten` is True, the image is converted using mode 'F'.
When `mode` is not None and `flatten` is True, the image is first
converted according to `mode`, and the result is then flattened using
mode 'F'.
"""
im = Image.open(name)
return fromimage(im, flatten=flatten, mode=mode)
@numpy.deprecate(message="`imsave` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use ``imageio.imwrite`` instead.")
def imsave(name, arr, format=None):
"""
Save an array as an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
name : str or file object
Output file name or file object.
arr : ndarray, MxN or MxNx3 or MxNx4
Array containing image values. If the shape is ``MxN``, the array
represents a grey-level image. Shape ``MxNx3`` stores the red, green
and blue bands along the last dimension. An alpha layer may be
included, specified as the last colour band of an ``MxNx4`` array.
format : str
Image format. If omitted, the format to use is determined from the
file name extension. If a file object was used instead of a file name,
this parameter should always be used.
Examples
--------
Construct an array of gradient intensity values and save to file:
>>> from scipy.misc import imsave
>>> x = np.zeros((255, 255))
>>> x = np.zeros((255, 255), dtype=np.uint8)
>>> x[:] = np.arange(255)
>>> imsave('gradient.png', x)
Construct an array with three colour bands (R, G, B) and store to file:
>>> rgb = np.zeros((255, 255, 3), dtype=np.uint8)
>>> rgb[..., 0] = np.arange(255)
>>> rgb[..., 1] = 55
>>> rgb[..., 2] = 1 - np.arange(255)
>>> imsave('rgb_gradient.png', rgb)
"""
im = toimage(arr, channel_axis=2)
if format is None:
im.save(name)
else:
im.save(name, format)
return
@numpy.deprecate(message="`fromimage` is deprecated in SciPy 1.0.0. "
"and will be removed in 1.2.0.\n"
"Use ``np.asarray(im)`` instead.")
def fromimage(im, flatten=False, mode=None):
"""
Return a copy of a PIL image as a numpy array.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
"""
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a
_errstr = "Mode is unknown or incompatible with input array shape."
@numpy.deprecate(message="`toimage` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use Pillow's ``Image.fromarray`` directly instead.")
def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = asarray(arr)
if iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(numpy.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(asarray(pal, dtype=uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] *
ones((3,), dtype=uint8)[newaxis, :])
image.putpalette(asarray(pal, dtype=uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = amin(ravel(data))
if cmax is None:
cmax = amax(ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(numpy.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = numpy.flatnonzero(asarray(shape) == 3)[0]
else:
ca = numpy.flatnonzero(asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tostring()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = transpose(bytedata, (0, 2, 1)).tostring()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = transpose(bytedata, (1, 2, 0)).tostring()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image
@numpy.deprecate(message="`imrotate` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use ``skimage.transform.rotate`` instead.")
def imrotate(arr, angle, interp='bilinear'):
"""
Rotate an image counter-clockwise by angle degrees.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
arr : ndarray
Input array of image to be rotated.
angle : float
The angle of rotation.
interp : str, optional
Interpolation
- 'nearest' : for nearest neighbor
- 'bilinear' : for bilinear
- 'lanczos' : for lanczos
- 'cubic' : for bicubic
- 'bicubic' : for bicubic
Returns
-------
imrotate : ndarray
The rotated array of image.
"""
arr = asarray(arr)
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
im = toimage(arr)
im = im.rotate(angle, resample=func[interp])
return fromimage(im)
@numpy.deprecate(message="`imshow` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use ``matplotlib.pyplot.imshow`` instead.")
def imshow(arr):
"""
Simple showing of an image through an external viewer.
This function is only available if Python Imaging Library (PIL) is installed.
Uses the image viewer specified by the environment variable
SCIPY_PIL_IMAGE_VIEWER, or if that is not defined then `see`,
to view a temporary file generated from array data.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
arr : ndarray
Array of image data to show.
Returns
-------
None
Examples
--------
>>> a = np.tile(np.arange(255), (255,1))
>>> from scipy import misc
>>> misc.imshow(a)
"""
im = toimage(arr)
fnum, fname = tempfile.mkstemp('.png')
try:
im.save(fname)
except:
raise RuntimeError("Error saving temporary image data.")
import os
os.close(fnum)
cmd = os.environ.get('SCIPY_PIL_IMAGE_VIEWER', 'see')
status = os.system("%s %s" % (cmd, fname))
os.unlink(fname)
if status != 0:
raise RuntimeError('Could not execute image viewer.')
@numpy.deprecate(message="`imresize` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use ``skimage.transform.resize`` instead.")
def imresize(arr, size, interp='bilinear', mode=None):
"""
Resize an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image (height, width).
interp : str, optional
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear',
'bicubic' or 'cubic').
mode : str, optional
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
If ``mode=None`` (the default), 2-D images will be treated like
``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays,
`mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL.
"""
im = toimage(arr, mode=mode)
ts = type(size)
if issubdtype(ts, numpy.signedinteger):
percent = size / 100.0
size = tuple((array(im.size)*percent).astype(int))
elif issubdtype(type(size), numpy.floating):
size = tuple((array(im.size)*size).astype(int))
else:
size = (size[1], size[0])
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
imnew = im.resize(size, resample=func[interp])
return fromimage(imnew)
@numpy.deprecate(message="`imfilter` is deprecated in SciPy 1.0.0, "
"and will be removed in 1.2.0.\n"
"Use Pillow filtering functionality directly.")
def imfilter(arr, ftype):
"""
Simple filtering of an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
arr : ndarray
The array of Image in which the filter is to be applied.
ftype : str
The filter that has to be applied. Legal values are:
'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more',
'emboss', 'find_edges', 'smooth', 'smooth_more', 'sharpen'.
Returns
-------
imfilter : ndarray
The array with filter applied.
Raises
------
ValueError
*Unknown filter type.* If the filter you are trying
to apply is unsupported.
"""
_tdict = {'blur': ImageFilter.BLUR,
'contour': ImageFilter.CONTOUR,
'detail': ImageFilter.DETAIL,
'edge_enhance': ImageFilter.EDGE_ENHANCE,
'edge_enhance_more': ImageFilter.EDGE_ENHANCE_MORE,
'emboss': ImageFilter.EMBOSS,
'find_edges': ImageFilter.FIND_EDGES,
'smooth': ImageFilter.SMOOTH,
'smooth_more': ImageFilter.SMOOTH_MORE,
'sharpen': ImageFilter.SHARPEN
}
im = toimage(arr)
if ftype not in _tdict:
raise ValueError("Unknown filter type.")
return fromimage(im.filter(_tdict[ftype]))
| mit |
toobaz/pandas | pandas/tests/io/test_packers.py | 2 | 32589 | import datetime
import glob
from io import BytesIO
import os
from warnings import catch_warnings, filterwarnings
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.errors import PerformanceWarning
import pandas
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
bdate_range,
date_range,
period_range,
)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
ensure_clean,
)
from pandas.io.packers import read_msgpack, to_msgpack
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
@pytest.fixture(scope="module")
def current_packers_data():
# our current version packers data
from pandas.tests.io.generate_legacy_storage_files import create_msgpack_data
return create_msgpack_data()
@pytest.fixture(scope="module")
def all_packers_data():
# our all of our current version packers data
from pandas.tests.io.generate_legacy_storage_files import create_data
return create_data()
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert len(a) == len(b)
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if b.categories.inferred_type == "string":
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert a == b
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestPackers:
def setup_method(self, method):
self.path = "__{}__.msg".format(tm.rands(10))
def teardown_method(self, method):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
with open(p, "wb") as fh:
fh.write(s)
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A:
def __init__(self):
self.read = 0
msg = "Invalid file path or buffer object type: <class '{}'>"
invalid_path = os.path.join("nonexistent_dir", "df.msgpack")
with pytest.raises(ValueError, match=msg.format("NoneType")):
read_msgpack(path_or_buf=None)
with pytest.raises(ValueError, match=msg.format("dict")):
read_msgpack(path_or_buf={})
with pytest.raises(ValueError, match=msg.format(r".*\.A")):
read_msgpack(path_or_buf=A())
with pytest.raises(FileNotFoundError, match="does not exist"):
read_msgpack(path_or_buf=invalid_path)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_bool(self):
x = np.bool_(1)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
x = np.bool_(0)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, "complex128"):
pytest.skip("numpy can not handle complex128")
x = [np.float32(np.random.rand()) for i in range(5)] + [
np.complex128(np.random.rand() + 1j * np.random.rand()) for i in range(5)
]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + [
(np.random.rand() + 1j * np.random.rand()) for i in range(5)
]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_dict_float(self):
x = {"foo": 1.0, "bar": 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {"foo": 1.0 + 1.0j, "bar": 2.0 + 2.0j}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="complex value")
def test_dict_numpy_float(self):
x = {"foo": np.float32(1.0), "bar": np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {"foo": np.complex128(1.0 + 1.0j), "bar": np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="numpy complex128")
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ["float32", "float64"]:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
assert all(map(lambda x, y: x == y, x, x_rec)) and x.dtype == x_rec.dtype
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), "foo", np.bool_(1)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [
Timestamp("20130101"),
Timestamp("20130101", tz="US/Eastern"),
Timestamp("201301010501"),
]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_nat(self):
nat_rec = self.encode_decode(NaT)
assert NaT is nat_rec
def test_datetimes(self):
for i in [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15)),
]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_timedeltas(self):
for i in [
datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000),
]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_periods(self):
# 13463
for i in [Period("2010-09", "M"), Period("2014-Q1", "Q")]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_intervals(self):
# 19967
for i in [Interval(0, 1), Interval(0, 1, "left"), Interval(10, 25.0, "right")]:
i_rec = self.encode_decode(i)
assert i == i_rec
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestIndex(TestPackers):
def setup_method(self, method):
super().setup_method(method)
self.d = {
"string": tm.makeStringIndex(100),
"date": tm.makeDateIndex(100),
"int": tm.makeIntIndex(100),
"rng": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"empty": Index([]),
"tuple": Index(zip(["foo", "bar", "baz"], [1, 2, 3])),
"period": Index(period_range("2012-1-1", freq="M", periods=3)),
"date2": Index(date_range("2013-01-1", periods=10)),
"bdate": Index(bdate_range("2013-01-02", periods=10)),
"cat": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"timedelta": tm.makeTimedeltaIndex(100, "H"),
}
self.mi = {
"reg": MultiIndex.from_tuples(
[
("bar", "one"),
("baz", "two"),
("foo", "two"),
("qux", "one"),
("qux", "two"),
],
names=["first", "second"],
)
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp("20130101"), Timestamp("20130103")])
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index(
[Timestamp("20130101 9:00:00"), Timestamp("20130103 11:00:00")]
).tz_localize("US/Eastern")
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def categorical_index(self):
# GH15487
df = DataFrame(np.random.randn(10, 2))
df = df.astype({0: "category"}).set_index(0)
result = self.encode_decode(df)
tm.assert_frame_equal(result, df)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestSeries(TestPackers):
def setup_method(self, method):
super().setup_method(method)
self.d = {}
s = tm.makeStringSeries()
s.name = "string"
self.d["string"] = s
s = tm.makeObjectSeries()
s.name = "object"
self.d["object"] = s
s = Series(iNaT, dtype="M8[ns]", index=range(5))
self.d["date"] = s
data = {
"A": [0.0, 1.0, 2.0, 3.0, np.nan],
"B": [0, 1, 0, 1, 0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": date_range("1/1/2009", periods=5),
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
"F": [Timestamp("20130102", tz="US/Eastern")] * 2
+ [Timestamp("20130603", tz="CET")] * 3,
"G": [Timestamp("20130102", tz="US/Eastern")] * 5,
"H": Categorical([1, 2, 3, 4, 5]),
"I": Categorical([1, 2, 3, 4, 5], ordered=True),
"J": (np.bool_(1), 2, 3, 4, 5),
}
self.d["float"] = Series(data["A"])
self.d["int"] = Series(data["B"])
self.d["mixed"] = Series(data["E"])
self.d["dt_tz_mixed"] = Series(data["F"])
self.d["dt_tz"] = Series(data["G"])
self.d["cat_ordered"] = Series(data["H"])
self.d["cat_unordered"] = Series(data["I"])
self.d["numpy_bool_mixed"] = Series(data["J"])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestCategorical(TestPackers):
def setup_method(self, method):
super().setup_method(method)
self.d = {}
self.d["plain_str"] = Categorical(["a", "b", "c", "d", "e"])
self.d["plain_str_ordered"] = Categorical(
["a", "b", "c", "d", "e"], ordered=True
)
self.d["plain_int"] = Categorical([5, 6, 7, 8])
self.d["plain_int_ordered"] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
@pytest.mark.filterwarnings("ignore:msgpack:FutureWarning")
class TestNDFrame(TestPackers):
def setup_method(self, method):
super().setup_method(method)
data = {
"A": [0.0, 1.0, 2.0, 3.0, np.nan],
"B": [0, 1, 0, 1, 0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": date_range("1/1/2009", periods=5),
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
"F": [Timestamp("20130102", tz="US/Eastern")] * 5,
"G": [Timestamp("20130603", tz="CET")] * 5,
"H": Categorical(["a", "b", "c", "d", "e"]),
"I": Categorical(["a", "b", "c", "d", "e"], ordered=True),
}
self.frame = {
"float": DataFrame(dict(A=data["A"], B=Series(data["A"]) + 1)),
"int": DataFrame(dict(A=data["B"], B=Series(data["B"]) + 1)),
"mixed": DataFrame(data),
}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
packed_items = tuple(
[self.frame["float"], self.frame["float"].A, self.frame["float"].B, None]
)
l_rec = self.encode_decode(packed_items)
check_arbitrary(packed_items, l_rec)
# this is an oddity in that packed lists will be returned as tuples
packed_items = [
self.frame["float"],
self.frame["float"].A,
self.frame["float"].B,
None,
]
l_rec = self.encode_decode(packed_items)
assert isinstance(l_rec, tuple)
check_arbitrary(packed_items, l_rec)
def test_iterator(self):
packed_items = [
self.frame["float"],
self.frame["float"].A,
self.frame["float"].B,
None,
]
with ensure_clean(self.path) as path:
to_msgpack(path, *packed_items)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, packed_items[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range("1/1/2013", "1/3/2013"))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range("1/1/2013", "1/2/2013"))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=["a", "a"])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ["abc", np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
msg = r"msgpack sparse (series|frame) is not implemented"
with pytest.raises(NotImplementedError, match=msg):
self.encode_decode(obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal, check_series_type=True)
ss2 = s.to_sparse(kind="integer")
self._check_roundtrip(ss2, tm.assert_series_equal, check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal, check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.loc[3:5, 1:3] = np.nan
s.loc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal, check_frame_type=True)
ss2 = s.to_sparse(kind="integer")
self._check_roundtrip(ss2, tm.assert_frame_equal, check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal, check_frame_type=True)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestCompression(TestPackers):
"""See https://github.com/pandas-dev/pandas/pull/9783
"""
def setup_method(self, method):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super().setup_method(method)
data = {
"A": np.arange(1000, dtype=np.float64),
"B": np.arange(1000, dtype=np.int32),
"C": list(100 * "abcdefghij"),
"D": date_range(datetime.datetime(2015, 4, 1), periods=1000),
"E": [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
"float": DataFrame({k: data[k] for k in ["A", "A"]}),
"int": DataFrame({k: data[k] for k in ["B", "B"]}),
"mixed": DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
assert block.values.flags.writeable
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip("no zlib")
self._test_compression("zlib")
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip("no blosc")
self._test_compression("blosc")
def _test_compression_warns_when_decompress_caches(self, monkeypatch, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype("float64"): 1.0,
np.dtype("int32"): 1,
np.dtype("object"): "a",
np.dtype("datetime64[ns]"): np.timedelta64(1, "ns"),
np.dtype("timedelta64[ns]"): np.timedelta64(1, "ns"),
}
with monkeypatch.context() as m, tm.assert_produces_warning(
PerformanceWarning
) as ws:
m.setattr(compress_module, "decompress", decompress)
with catch_warnings():
filterwarnings("ignore", category=FutureWarning)
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
assert block.values.flags.writeable
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
assert str(w.message) == (
"copying data after decompressing; "
"this may mean that decompress is "
"caching its result"
)
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
assert buf == control_buf
def test_compression_warns_when_decompress_caches_zlib(self, monkeypatch):
if not _ZLIB_INSTALLED:
pytest.skip("no zlib")
self._test_compression_warns_when_decompress_caches(monkeypatch, "zlib")
def test_compression_warns_when_decompress_caches_blosc(self, monkeypatch):
if not _BLOSC_INSTALLED:
pytest.skip("no blosc")
self._test_compression_warns_when_decompress_caches(monkeypatch, "blosc")
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype="uint8")
with tm.assert_produces_warning(None):
with catch_warnings():
filterwarnings("ignore", category=FutureWarning)
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
assert empty_unpacked.flags.writeable
char = np.array([ord(b"a")], dtype="uint8")
with tm.assert_produces_warning(None):
with catch_warnings():
filterwarnings("ignore", category=FutureWarning)
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
assert char_unpacked.flags.writeable
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b"b")
# we compare the ord of bytes b'a' with unicode 'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
assert ord(b"a") == ord("a")
tm.assert_numpy_array_equal(char_unpacked, np.array([ord(b"b")], dtype="uint8"))
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip("no zlib")
self._test_small_strings_no_warn("zlib")
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip("no blosc")
self._test_small_strings_no_warn("blosc")
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip("no blosc")
df1 = DataFrame({"A": list("abcd")})
df2 = DataFrame(df1, index=[1.0, 2.0, 3.0, 4.0])
assert 1 in self.encode_decode(df1["A"], compress="blosc")
assert 1.0 in self.encode_decode(df2["A"], compress="blosc")
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({"A": list("abcd")})
df2 = DataFrame(df1, index=[1.0, 2.0, 3.0, 4.0])
assert 1 in self.encode_decode(df1["A"], compress="zlib")
assert 1.0 in self.encode_decode(df2["A"], compress="zlib")
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip("no blosc")
if not self._SQLALCHEMY_INSTALLED:
pytest.skip("no sqlalchemy")
expected = DataFrame({"A": list("abcd")})
df = self.encode_decode(expected, compress="blosc")
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql("test", eng, if_exists="append")
result = pandas.read_sql_table("test", eng, index_col="index")
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
pytest.skip("no zlib")
if not self._SQLALCHEMY_INSTALLED:
pytest.skip("no sqlalchemy")
expected = DataFrame({"A": list("abcd")})
df = self.encode_decode(expected, compress="zlib")
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql("test", eng, if_exists="append")
result = pandas.read_sql_table("test", eng, index_col="index")
result.index.names = [None]
assert_frame_equal(expected, result)
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestEncoding(TestPackers):
def setup_method(self, method):
super().setup_method(method)
data = {
"A": ["\u2019"] * 1000,
"B": np.arange(1000, dtype=np.int32),
"C": list(100 * "abcdefghij"),
"D": date_range(datetime.datetime(2015, 4, 1), periods=1000),
"E": [datetime.timedelta(days=x) for x in range(1000)],
"G": [400] * 1000,
}
self.frame = {
"float": DataFrame({k: data[k] for k in ["A", "A"]}),
"int": DataFrame({k: data[k] for k in ["B", "B"]}),
"mixed": DataFrame(data),
}
self.utf_encodings = ["utf8", "utf16", "utf32"]
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in self.frame.values():
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in self.frame.values():
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding="utf8")
assert result == expected
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
files = glob.glob(
os.path.join(os.path.dirname(__file__), "data", "legacy_msgpack", "*", "*.msgpack")
)
@pytest.fixture(params=files)
def legacy_packer(request, datapath):
return datapath(request.param)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestMsgpack:
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
"""
minimum_structure = {
"series": ["float", "int", "mixed", "ts", "mi", "dup"],
"frame": ["float", "int", "mixed", "mi"],
"index": ["int", "date", "period"],
"mi": ["reg2"],
}
def check_min_structure(self, data, version):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
msg = '"{0}" not found in data["{1}"]'.format(kind, typ)
assert kind in data[typ], msg
def compare(self, current_data, all_data, vf, version):
data = read_msgpack(vf)
self.check_min_structure(data, version)
for typ, dv in data.items():
assert typ in all_data, "unpacked data contains " 'extra key "{0}"'.format(
typ
)
for dt, result in dv.items():
assert (
dt in current_data[typ]
), 'data["{0}"] contains extra ' 'key "{1}"'.format(typ, dt)
try:
expected = current_data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comp_method, None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
tm.assert_frame_equal(result, expected)
def test_msgpacks_legacy(
self, current_packers_data, all_packers_data, legacy_packer, datapath
):
version = os.path.basename(os.path.dirname(legacy_packer))
try:
with catch_warnings(record=True):
self.compare(
current_packers_data, all_packers_data, legacy_packer, version
)
except ImportError:
# blosc not installed
pass
def test_msgpack_period_freq(self):
# https://github.com/pandas-dev/pandas/issues/24135
s = Series(np.random.rand(5), index=date_range("20130101", periods=5))
r = read_msgpack(s.to_msgpack())
repr(r)
| bsd-3-clause |
xzh86/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
Breta01/handwriting-ocr | src/ocr/imgtransform.py | 1 | 1130 | # -*- coding: utf-8 -*-
"""
Functions for transforming and preprocessing images for training
"""
import numpy as np
import pandas as pd
import cv2
from scipy.ndimage.interpolation import map_coordinates
def coordinates_remap(image, factor_alpha, factor_sigma):
"""Transforming image using remaping coordinates."""
alpha = image.shape[1] * factor_alpha
sigma = image.shape[1] * factor_sigma
shape = image.shape
blur_size = int(4*sigma) | 1
dx = alpha * cv2.GaussianBlur((np.random.rand(*shape) * 2 - 1),
ksize=(blur_size, blur_size),
sigmaX=sigma)
dy = alpha * cv2.GaussianBlur((np.random.rand(*shape) * 2 - 1),
ksize=(blur_size, blur_size),
sigmaX=sigma)
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
# TODO use cv2.remap(image, dx, dy, interpolation=cv2.INTER_LINEAR)
return np.array(map_coordinates(image, indices, order=1, mode='constant').reshape(shape)) | mit |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| gpl-3.0 |
microsoft/EconML | econml/sklearn_extensions/ensemble.py | 1 | 8978 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
""" Subsampled honest forest extension to scikit-learn's forest methods. Contains pieces of code from
scikit-learn's random forest implementation.
"""
from ..grf import RegressionForest
from ..utilities import deprecated
@deprecated("The SubsampledHonestForest class has been deprecated by the grf.RegressionForest class; "
"an upcoming release will remove support for the this class.")
def SubsampledHonestForest(n_estimators=100,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
subsample_fr='auto',
honest=True,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
"""
An implementation of a subsampled honest random forest regressor on top of an sklearn
regression tree. Implements subsampling and honesty as described in [3]_,
but uses a scikit-learn regression tree as a base. It provides confidence intervals based on ideas
described in [3]_ and [4]_
Parameters
----------
n_estimators : integer, optional (default=100)
The total number of trees in the forest. The forest consists of a
forest of sqrt(n_estimators) sub-forests, where each sub-forest
contains sqrt(n_estimators) trees.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of splitting samples required to split an internal node.
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` splitting samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression. After construction the tree is also pruned
so that there are at least min_samples_leaf estimation samples on
each leaf.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
splitting samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided. After construction
the tree is pruned so that the fraction of the sum total weight
of the estimation samples contained in each leaf node is at
least min_weight_fraction_leaf
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of split samples, ``N_t`` is the number of
split samples at the current node, ``N_t_L`` is the number of split samples in the
left child, and ``N_t_R`` is the number of split samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
subsample_fr : float or 'auto', optional (default='auto')
The fraction of the half-samples that are used on each tree. Each tree
will be built on subsample_fr * n_samples/2.
If 'auto', then the subsampling fraction is set to::
(n_samples/2)**(1-1/(2*n_features+2))/(n_samples/2)
which is sufficient to guarantee asympotitcally valid inference.
honest : boolean, optional (default=True)
Whether to use honest trees, i.e. half of the samples are used for
creating the tree structure and the other half for the estimation at
the leafs. If False, then all samples are used for both parts.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
`None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
subsample_fr_ : float
The chosen subsample ratio. Eache tree was trained on ``subsample_fr_ * n_samples / 2``
data points.
References
----------
.. [3] S. Athey, S. Wager, "Estimation and Inference of Heterogeneous Treatment Effects using Random Forests",
Journal of the American Statistical Association 113.523 (2018): 1228-1242.
.. [4] S. Athey, J. Tibshirani, and S. Wager, "Generalized random forests",
The Annals of Statistics, 47(2), 1148-1178, 2019.
"""
return RegressionForest(n_estimators=n_estimators,
criterion=criterion,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
min_impurity_decrease=min_impurity_decrease,
max_samples=.45 if subsample_fr == 'auto' else subsample_fr / 2,
honest=honest,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
| mit |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/staging/models/rough/nmt_gpu/nmt.py | 6 | 46512 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import time
import tensorflow as tf
from mlperf_compliance import mlperf_log
import estimator
from utils import evaluation_utils
from utils import iterator_utils
from utils import misc_utils as utils
from utils import vocab_utils
from variable_mgr import constants
utils.check_tensorflow_version()
FLAGS = None
# LINT.IfChange
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="gnmt",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="normed_bahdanau",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="gnmt_v2",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=100000, help="Num steps to train.")
parser.add_argument(
"--max_train_epochs", type=int, default=8, help="Max number of epochs.")
parser.add_argument("--num_examples_per_epoch", type=int, default=4068191,
help="Number of examples in one epoch")
parser.add_argument(
"--target_bleu", type=float, default=22.0, help="Target bleu.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="",
help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--dev_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--output_dir", type=str, default="",
help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=50,
help="Max length of src sequences during training.")
parser.add_argument(
"--tgt_max_len",
type=int,
default=50,
help="Max length of tgt sequences during training.")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.")
parser.add_argument("--tgt_max_len_infer", type=int, default=80,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=5,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument("--max_train", type=int, default=0,
help="Limit on the size of training data (0: no limit).")
parser.add_argument(
"--num_buckets",
type=int,
default=1,
help="Put data into similar-length buckets.")
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument(
"--save_checkpoints_steps", type=int, default=1000,
help="save_checkpoints_steps")
parser.add_argument(
"--num_gpus", type=int, default=1, help="Number of gpus in each worker.")
parser.add_argument(
"--log_device_placement",
type="bool",
nargs="?",
const=True,
default=True,
help="Debug GPU allocation.")
parser.add_argument("--steps_per_external_eval", type=int, default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=1,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--num_keep_ckpts", type=int, default=5,
help="Max number of checkpoints to keep.")
parser.add_argument("--avg_ckpts", type="bool", nargs="?",
const=True, default=False, help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
"""))
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument("--inference_input_file", type=str, default=None,
help="Set to the text to decode.")
parser.add_argument("--inference_list", type=str, default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode."))
parser.add_argument(
"--infer_batch_size",
type=int,
default=64,
help="Batch size for inference mode.")
parser.add_argument("--detokenizer_file", type=str,
default="",
help=("""Detokenizer script file."""))
parser.add_argument("--use_borg", type="bool", default=False)
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
parser.add_argument("--num_translations_per_input", type=int, default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
"""))
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--num_inter_threads", type=int, default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument("--num_intra_threads", type=int, default=0,
help="number of intra_op_parallelism_threads")
# Fp16
parser.add_argument("--use_fp16", type="bool", default=False,
help="use_fp16 for training and inference")
parser.add_argument(
"--fp16_loss_scale",
type=float,
default=128,
help="If fp16 is enabled, the loss is multiplied by this amount "
"right before gradients are computed, then each gradient "
"is divided by this amount. Mathematically, this has no "
"effect, but it helps avoid fp16 underflow. Set to 1 to "
"effectively disable.")
parser.add_argument(
"--enable_auto_loss_scale",
type="bool",
default=False,
help="If True and use_fp16 is True, automatically adjust the "
"loss scale during training.")
parser.add_argument(
"--fp16_inc_loss_scale_every_n",
type=int,
default=1000,
help="If fp16 is enabled and enable_auto_loss_scale is "
"True, increase the loss scale every n steps.")
parser.add_argument(
"--check_tower_loss_numerics",
type="bool",
default=False, # Set to false for xla.compile()
help="whether to check tower loss numerics")
parser.add_argument(
"--use_fp32_batch_matmul",
type="bool",
default=True,
help="Whether to use fp32 batch matmul")
# Performance
# XLA
parser.add_argument(
"--force_inputs_padding",
type="bool",
default=False,
help="Force padding input batch to src_max_len and tgt_max_len")
parser.add_argument(
"--use_xla",
type="bool",
default=False,
help="Use xla to compile a few selected locations, mostly Defuns.")
parser.add_argument(
"--xla_compile",
type="bool",
default=False,
help="Use xla.compile() for each tower's fwd and bak pass.")
parser.add_argument(
"--use_autojit_xla",
type="bool",
default=False,
help="Use auto jit xla.")
# GPU knobs
parser.add_argument(
"--use_pintohost_optimizer",
type="bool",
default=False,
help="whether to use PinToHost optimizer")
parser.add_argument(
"--use_cudnn_lstm",
type="bool",
default=False,
help="whether to use cudnn_lstm for encoder, non residual layers")
parser.add_argument(
"--use_loose_bidi_cudnn_lstm",
type="bool",
default=False,
help="whether to use loose bidi cudnn_lstm")
parser.add_argument(
"--use_fused_lstm",
type="bool",
default=False,
help="whether to use fused lstm and variant. If enabled, training will "
"use LSTMBlockFusedCell, infer will use LSTMBlockCell when appropriate.")
parser.add_argument(
"--use_fused_lstm_dec",
type="bool",
default=False,
help="whether to use fused lstm for decoder (training only).")
parser.add_argument(
"--gpu_indices",
type=str,
default="",
help="Indices of worker GPUs in ring order")
parser.add_argument(
"--gpu_thread_mode",
type=str,
default="global",
help="Methods to assign GPU host work to threads. "
"global: all GPUs and CPUs share the same global threads; "
"gpu_private: a private threadpool for each GPU; "
"gpu_shared: all GPUs share the same threadpool.")
parser.add_argument(
"--per_gpu_thread_count",
type=int,
default=0,
help="The number of threads to use for GPU. Only valid when "
"gpu_thread_mode is not global.")
parser.add_argument(
"--sync_on_finish",
type="bool",
default=False,
help="Enable/disable whether the devices are synced after each "
"step.")
parser.add_argument(
"--force_gpu_compatible",
type="bool",
default=False,
help="whether to enable force_gpu_compatible in GPU_Options")
# Graph knobs
parser.add_argument("--parallel_iterations", type=int, default=10,
help="number of parallel iterations in dynamic_rnn")
parser.add_argument("--use_dist_strategy", type="bool", default=False,
help="whether to use distribution strategy")
parser.add_argument(
"--hierarchical_copy",
type="bool",
default=False,
help="Use hierarchical copies. Currently only optimized for "
"use on a DGX-1 with 8 GPUs and may perform poorly on "
"other hardware. Requires --num_gpus > 1, and only "
"recommended when --num_gpus=8")
parser.add_argument(
"--network_topology",
type=constants.NetworkTopology,
default=constants.NetworkTopology.DGX1,
choices=list(constants.NetworkTopology))
parser.add_argument(
"--enable_layout_optimizer",
type="bool",
default=False,
help="whether to enable layout optimizer")
parser.add_argument(
"--use_block_lstm",
type="bool",
default=False,
help="whether to use block lstm")
parser.add_argument(
"--use_defun",
type="bool",
default=False,
help="whether to use Defun")
# Gradient tricks
parser.add_argument(
"--gradient_repacking",
type=int,
default=0,
help="Use gradient repacking. It"
"currently only works with replicated mode. At the end of"
"of each step, it repacks the gradients for more efficient"
"cross-device transportation. A non-zero value specifies"
"the number of split packs that will be formed.")
parser.add_argument(
"--compact_gradient_transfer",
type="bool",
default=True,
help="Compact gradient as much as possible for cross-device transfer and "
"aggregation.")
parser.add_argument(
"--all_reduce_spec",
type=str,
default="nccl",
help="A specification of the all_reduce algorithm to be used "
"for reducing gradients. For more details, see "
"parse_all_reduce_spec in variable_mgr.py. An "
"all_reduce_spec has BNF form:\n"
"int ::= positive whole number\n"
"g_int ::= int[KkMGT]?\n"
"alg_spec ::= alg | alg#int\n"
"range_spec ::= alg_spec | alg_spec/alg_spec\n"
"spec ::= range_spec | range_spec:g_int:range_spec\n"
"NOTE: not all syntactically correct constructs are "
"supported.\n\n"
"Examples:\n "
"\"xring\" == use one global ring reduction for all "
"tensors\n"
"\"pscpu\" == use CPU at worker 0 to reduce all tensors\n"
"\"nccl\" == use NCCL to locally reduce all tensors. "
"Limited to 1 worker.\n"
"\"nccl/xring\" == locally (to one worker) reduce values "
"using NCCL then ring reduce across workers.\n"
"\"pscpu:32k:xring\" == use pscpu algorithm for tensors of "
"size up to 32kB, then xring for larger tensors.")
parser.add_argument(
"--agg_small_grads_max_bytes",
type=int,
default=0,
help="If > 0, try to aggregate tensors of less than this "
"number of bytes prior to all-reduce.")
parser.add_argument(
"--agg_small_grads_max_group",
type=int,
default=10,
help="When aggregating small tensors for all-reduce do not "
"aggregate more than this many into one new tensor.")
parser.add_argument(
"--allreduce_merge_scope",
type=int,
default=1,
help="Establish a name scope around this many "
"gradients prior to creating the all-reduce operations. "
"It may affect the ability of the backend to merge "
"parallel ops.")
# Other knobs
parser.add_argument(
"--local_parameter_device",
type=str,
default="gpu",
help="Device to use as parameter server: cpu or gpu. For "
"distributed training, it can affect where caching of "
"variables happens.")
parser.add_argument(
"--autotune_threshold",
type=int,
default=0,
help="The autotune threshold for the models")
parser.add_argument(
"--datasets_num_private_threads",
type=int,
default=None,
help="Number of threads for a private threadpool created for "
"all datasets computation. By default, we pick an "
"appropriate number. If set to 0, we use the default "
"tf-Compute threads for dataset operations.")
parser.add_argument(
"--winograd_nonfused",
type="bool",
default=True,
help="Enable/disable using the Winograd non-fused algorithms.")
parser.add_argument(
"--batchnorm_persistent",
type="bool",
default=True,
help="Enable/disable using the CUDNN_BATCHNORM_SPATIAL_PERSISTENT "
"mode for batchnorm.")
parser.add_argument(
"--device",
type=str,
default="gpu",
help="Device to use for computation: cpu or gpu")
parser.add_argument(
"--allow_growth",
type="bool",
default=False,
help="whether to enable allow_growth in GPU_Options")
parser.add_argument(
"--use_resource_vars",
type="bool",
default=False,
help="Use resource variables instead of normal variables. "
"Resource variables are slower, but this option is useful "
"for debugging their performance.")
# Performance tuning specific to MKL.
parser.add_argument(
"--mkl",
type="bool",
default=False,
help="If true, set MKL environment variables.")
parser.add_argument(
"--kmp_blocktime",
type=int,
default=30,
help="The time, in milliseconds, that a thread should wait, "
"after completing the execution of a parallel region, "
"before sleeping")
parser.add_argument(
"--kmp_affinity",
type=str,
default="granularity=fine,verbose,compact,1,0",
help="Restricts execution of certain threads (virtual execution "
"units) to a subset of the physical processing units in a "
"multiprocessor computer.")
parser.add_argument(
"--kmp_settings", type=int, default=1,
help="If set to 1, MKL settings will be printed.")
# Debug
parser.add_argument("--debug", type="bool", default=False,
help="Debug train and eval")
parser.add_argument("--show_metrics", type="bool", default=True,
help="whether to show detailed metrics")
parser.add_argument("--build_graph_only", type="bool", default=False,
help="whehter or not just building the graph")
parser.add_argument("--clip_grads", type="bool", default=True,
help="whether to clip gradients")
parser.add_argument("--profile", type="bool", default=False,
help="If generate profile")
parser.add_argument("--profile_save_steps", type=int, default=10,
help="Save timeline every N steps.")
# TPU
parser.add_argument("--use_dynamic_rnn", type="bool", default=True)
parser.add_argument("--master", type=str, default="")
parser.add_argument("--use_synthetic_data", type="bool", default=False)
parser.add_argument(
"--iterations_per_loop",
type=int,
default=100,
help="the number of iterations to run on TPU before returning to host")
parser.add_argument(
"--mode", type=str, default="train_and_eval",
choices=["train", "train_and_eval", "infer"])
parser.add_argument(
"--run_name",
type=str,
default="",
help=
"if set, load ckpt from /gs://ij-d/home/mlperf-nmt/'run_name'"
)
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=os.path.join(flags.data_dir, flags.train_prefix),
dev_prefix=os.path.join(flags.data_dir, flags.dev_prefix),
test_prefix=os.path.join(flags.data_dir, flags.test_prefix),
vocab_prefix=os.path.join(flags.data_dir, flags.vocab_prefix),
embed_prefix=flags.embed_prefix,
output_dir=flags.output_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
max_train_epochs=flags.max_train_epochs,
num_examples_per_epoch=flags.num_examples_per_epoch,
target_bleu=flags.target_bleu,
label_smoothing=flags.label_smoothing,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
detokenizer_file=flags.detokenizer_file,
use_borg=flags.use_borg,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
save_checkpoints_steps=flags.save_checkpoints_steps,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
share_vocab=flags.share_vocab,
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
language_model=flags.language_model,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
# Fp16
use_fp16=flags.use_fp16,
fp16_loss_scale=flags.fp16_loss_scale,
enable_auto_loss_scale=flags.enable_auto_loss_scale,
fp16_inc_loss_scale_every_n=flags.fp16_inc_loss_scale_every_n,
check_tower_loss_numerics=flags.check_tower_loss_numerics,
use_fp32_batch_matmul=flags.use_fp32_batch_matmul,
# Performance
# GPU knbs
force_inputs_padding=flags.force_inputs_padding,
use_xla=flags.use_xla,
xla_compile=flags.xla_compile,
use_autojit_xla=flags.use_autojit_xla,
use_pintohost_optimizer=flags.use_pintohost_optimizer,
use_cudnn_lstm=flags.use_cudnn_lstm,
use_loose_bidi_cudnn_lstm=flags.use_loose_bidi_cudnn_lstm,
use_fused_lstm=flags.use_fused_lstm,
use_fused_lstm_dec=flags.use_fused_lstm_dec,
gpu_indices=flags.gpu_indices,
gpu_thread_mode=flags.gpu_thread_mode,
per_gpu_thread_count=flags.per_gpu_thread_count,
sync_on_finish=flags.sync_on_finish,
force_gpu_compatible=flags.force_gpu_compatible,
# Graph knobs
parallel_iterations=flags.parallel_iterations,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_dist_strategy=flags.use_dist_strategy,
hierarchical_copy=flags.hierarchical_copy,
network_topology=flags.network_topology,
enable_layout_optimizer=flags.enable_layout_optimizer,
use_block_lstm=flags.use_block_lstm,
# Grad tricks
gradient_repacking=flags.gradient_repacking,
compact_gradient_transfer=flags.compact_gradient_transfer,
all_reduce_spec=flags.all_reduce_spec,
agg_small_grads_max_bytes=flags.agg_small_grads_max_bytes,
agg_small_grads_max_group=flags.agg_small_grads_max_group,
allreduce_merge_scope=flags.allreduce_merge_scope,
# Other knobs
local_parameter_device=("cpu" if flags.num_gpus ==0
else flags.local_parameter_device),
autotune_threshold=flags.autotune_threshold,
datasets_num_private_threads=flags.datasets_num_private_threads,
winograd_nonfused=flags.winograd_nonfused,
batchnorm_persistent=flags.batchnorm_persistent,
device=flags.device,
allow_growth=flags.allow_growth,
use_resource_vars=flags.use_resource_vars,
mkl=flags.mkl,
kmp_blocktime=flags.kmp_blocktime,
kmp_affinity=flags.kmp_affinity,
kmp_settings=flags.kmp_settings,
# Debug
debug=flags.debug,
build_graph_only=flags.build_graph_only,
clip_grads=flags.clip_grads,
profile=flags.profile,
profile_save_steps=flags.profile_save_steps,
show_metrics=flags.show_metrics,
# TPU
master=flags.master,
use_synthetic_data=flags.use_synthetic_data,
iterations_per_loop=flags.iterations_per_loop,
mode=flags.mode,
run_name=flags.run_name)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if hparams.language_model:
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
mlperf_log.gnmt_print(key=mlperf_log.PREPROC_VOCAB_SIZE,
value={"src": src_vocab_size, "tgt": tgt_vocab_size})
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if hparams.embed_prefix:
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
metric = "bleu"
best_metric_dir = os.path.join(hparams.output_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
if hparams.avg_ckpts:
best_metric_dir = os.path.join(hparams.output_dir, "avg_best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "avg_best_" + metric, 0, update=False)
_add_argument(hparams, "avg_best_" + metric + "_dir", best_metric_dir)
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from output_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Job
jobid = flags.jobid
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = flags.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# Train or Evaluation
estimator_fn(hparams)
return hparams
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.use_fp16 and FLAGS.use_dist_strategy:
raise ValueError("use_fp16 and use_dist_strategy aren't compatible")
# Set up hacky envvars.
# Hack that affects Defun in attention_wrapper.py
active_xla_option_nums = np.sum([FLAGS.use_xla, FLAGS.use_autojit_xla,
FLAGS.xla_compile])
if active_xla_option_nums > 1:
raise ValueError(
"Only one of use_xla, xla_compile, use_autojit_xla can be set")
os.environ["use_xla"] = str(FLAGS.use_xla).lower()
if FLAGS.use_xla:
os.environ["use_defun"] = str(True).lower()
else:
os.environ["use_defun"] = str(FLAGS.use_defun).lower()
utils.print_out("use_defun is %s for attention" % os.environ["use_defun"])
# TODO(jamesqin): retire this config after Cuda9.1
os.environ["use_fp32_batch_matmul"] = ("true" if FLAGS.use_fp32_batch_matmul
else "false")
os.environ["xla_compile"] = "true" if FLAGS.xla_compile else "false"
os.environ["force_inputs_padding"] = (
"true" if FLAGS.force_inputs_padding else "false")
if FLAGS.mode == "train":
utils.print_out("Running training mode.")
FLAGS.num_buckets = 5
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "infer":
utils.print_out("Running inference mode.")
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
default_hparams = create_hparams(FLAGS)
default_hparams.num_buckets = 1
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("infer_hparams:")
utils.print_hparams(hparams)
# Run evaluation when there's a new checkpoint
for i, ckpt in enumerate(
evaluation_utils.get_all_checkpoints(FLAGS.output_dir)):
tf.logging.info("Starting to evaluate...")
eval_start = time.time()
bleu_score = estimator.eval_fn(hparams, ckpt)
eval_end = time.time()
utils.print_out("eval time for %d th ckpt: %.2f mins" %
(i, (eval_end - eval_start) / 60.), f=sys.stderr)
else:
assert FLAGS.mode == "train_and_eval"
utils.print_out("Running train and eval mode.")
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
default_hparams = create_hparams(FLAGS)
default_hparams.num_buckets = 5
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("training hparams:")
utils.print_hparams(hparams)
with tf.gfile.GFile(os.path.join(output_dir, "train_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(hparams) + "\n")
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
infer_hparams = tf.contrib.training.HParams(**hparams.values())
infer_hparams.num_buckets = 1
utils.print_out("infer_hparams:")
utils.print_hparams(infer_hparams)
with tf.gfile.GFile(os.path.join(output_dir, "infer_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(infer_hparams) + "\n")
epochs = 0
should_stop = epochs >= FLAGS.max_train_epochs
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_LOOP)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_TARGET, value=hparams.target_bleu)
while not should_stop:
utils.print_out("Starting epoch %d" % epochs)
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_EPOCH, value=epochs)
mlperf_log.gnmt_print(
key=mlperf_log.INPUT_SIZE,
value=iterator_utils.get_effective_train_epoch_size(hparams))
mlperf_log.gnmt_print(
key=mlperf_log.TRAIN_CHECKPOINT,
value=("Under " + hparams.output_dir))
try:
train_start = time.time()
estimator.train_fn(hparams)
except tf.errors.OutOfRangeError:
utils.print_out("training hits OutOfRangeError", f=sys.stderr)
train_end = time.time()
utils.print_out("training time for epoch %d: %.2f mins" %
(epochs, (train_end - train_start) / 60.), f=sys.stderr)
# This is probably sub-optimal, doing eval per-epoch
mlperf_log.gnmt_print(key=mlperf_log.EVAL_START)
eval_start = time.time()
bleu_score = estimator.eval_fn(infer_hparams)
eval_end = time.time()
utils.print_out("eval time for epoch %d: %.2f mins" %
(epochs, (eval_end - eval_start) / 60.), f=sys.stderr)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_ACCURACY,
value={"epoch": epochs, "value": bleu_score})
mlperf_log.gnmt_print(key=mlperf_log.EVAL_STOP, value=epochs)
if FLAGS.debug or bleu_score > FLAGS.target_bleu:
should_stop = True
utils.print_out(
"Stop job since target bleu is reached at epoch %d ." % epochs,
f=sys.stderr)
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": True})
if epochs >= FLAGS.max_train_epochs:
should_stop = True
utils.print_out("Stop job since max_train_epochs is reached.",
f=sys.stderr)
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": False})
epochs += 1
mlperf_log.gnmt_print(key=mlperf_log.RUN_FINAL)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/util/testing.py | 9 | 72423 | from __future__ import division
# pylint: disable-msg=W0402
import random
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import unittest
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.common import (is_sequence, array_equivalent, is_list_like, is_number,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_number, pprint_thing, take_1d,
needs_i8_conversion)
import pandas.compat as compat
from pandas.compat import(
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit
)
from pandas.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
Index, MultiIndex, Series, DataFrame, Panel, Panel4D)
from pandas.util.decorators import deprecate
from pandas import _testing
from pandas.io.common import urlopen
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE','None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', DeprecationWarning)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE','None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', DeprecationWarning)
set_testing_mode()
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pd.set_option('chained_assignment', 'raise')
@classmethod
def tearDownClass(cls):
pass
def reset_display_options(self):
# reset the display options
pd.reset_option('^display.', silent=True)
def round_trip_pickle(self, obj, path=None):
if path is None:
path = u('__%s__.pickle' % rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
# https://docs.python.org/3/library/unittest.html#deprecated-aliases
def assertEquals(self, *args, **kwargs):
return deprecate('assertEquals', self.assertEqual)(*args, **kwargs)
def assertNotEquals(self, *args, **kwargs):
return deprecate('assertNotEquals', self.assertNotEqual)(*args, **kwargs)
def assert_(self, *args, **kwargs):
return deprecate('assert_', self.assertTrue)(*args, **kwargs)
def assertAlmostEquals(self, *args, **kwargs):
return deprecate('assertAlmostEquals', self.assertAlmostEqual)(*args, **kwargs)
def assertNotAlmostEquals(self, *args, **kwargs):
return deprecate('assertNotAlmostEquals', self.assertNotAlmostEqual)(*args, **kwargs)
# NOTE: don't pass an NDFrame or index to this function - may not handle it
# well.
assert_almost_equal = _testing.assert_almost_equal
assert_dict_equal = _testing.assert_dict_equal
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(choice(RANDU_CHARS, nchars))
def choice(x, size=10):
"""sample with replacement; uniform over the input"""
try:
return np.random.choice(x, size=size)
except AttributeError:
return np.random.randint(len(x), size=size).choose(x)
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
import nose
if is_platform_32bit():
raise nose.SkipTest("skipping for 32 bit")
def mplskip(cls):
"""Skip a TestCase instance if matplotlib isn't installed"""
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use("Agg", warn=False)
except ImportError:
import nose
raise nose.SkipTest("matplotlib not installed")
cls.setUpClass = setUpClass
return cls
def _skip_if_mpl_1_5():
import matplotlib
v = matplotlib.__version__
if v > LooseVersion('1.4.3') or v[0] == '0':
import nose
raise nose.SkipTest("matplotlib 1.5")
def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
import nose
raise nose.SkipTest("no scipy.stats module")
try:
import scipy.interpolate
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate missing')
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
import nose
raise nose.SkipTest("pytz not installed")
def _skip_if_no_dateutil():
try:
import dateutil
except ImportError:
import nose
raise nose.SkipTest("dateutil not installed")
def _skip_if_windows_python_3():
if compat.PY3 and is_platform_windows():
import nose
raise nose.SkipTest("not used on python 3/win32")
def _skip_if_windows():
if is_platform_windows():
import nose
raise nose.SkipTest("Running on Windows")
def _skip_if_no_cday():
from pandas.core.datetools import cday
if cday is None:
import nose
raise nose.SkipTest("CustomBusinessDay not available.")
def _skip_if_python26():
if sys.version_info[:2] == (2, 6):
import nose
raise nose.SkipTest("skipping on python2.6")
def _skip_if_no_pathlib():
try:
from pathlib import Path
except ImportError:
import nose
raise nose.SkipTest("pathlib not available")
def _skip_if_no_localpath():
try:
from py.path import local as LocalPath
except ImportError:
import nose
raise nose.SkipTest("py.path not installed")
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
as we don't match the nansum/nanprod behavior for all-nan
ops, see GH9422
"""
if method not in ['sum','prod']:
return False
try:
import bottleneck as bn
return bn.__version__ >= LooseVersion('1.0')
except ImportError:
return False
def skip_if_no_ne(engine='numexpr'):
import nose
_USE_NUMEXPR = pd.computation.expressions._USE_NUMEXPR
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("numexpr not installed")
if not _USE_NUMEXPR:
raise nose.SkipTest("numexpr disabled")
if ne.__version__ < LooseVersion('2.0'):
raise nose.SkipTest("numexpr version too low: "
"%s" % ne.__version__)
#------------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs): # shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" seperated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if compat.PY3:
out_locales.append(str(x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
#------------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
#------------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import nose
raise nose.SkipTest('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: %d (file: %s)" %
(fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: %s" % e)
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
#------------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_equal(a, b, msg=""):
"""asserts that a equals b, like nose's assert_equal, but allows custom message to start.
Passes a and b to format string as well. So you can use '{0}' and '{1}' to display a and b.
Examples
--------
>>> assert_equal(2, 2, "apples")
>>> assert_equal(5.2, 1.2, "{0} was really a dead parrot")
Traceback (most recent call last):
...
AssertionError: 5.2 was really a dead parrot: 5.2 != 1.2
"""
assert a == b, "%s: %r != %r" % (msg.format(a,b), a, b)
def assert_index_equal(left, right, exact=False, check_names=True,
check_less_precise=False, check_exact=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool, default False
Whether to check the Index class, dtype and inferred_type are identical.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
check_exact : bool, default True
Whether to compare number exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
if type(l) != type(r):
msg = '{0} classes are different'.format(obj)
raise_assert_detail(obj, msg, l, r)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assertIn(r.inferred_type, ('string', 'unicode'))
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._simple_new(filled, index.names[level],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
return values
# instance validation
assertIsInstance(left, Index, '[index] ')
assertIsInstance(right, Index, '[index] ')
# class / dtype comparison
_check_types(left, right)
# level comparison
if left.nlevels != right.nlevels:
raise_assert_detail(obj, '{0} levels are different'.format(obj),
'{0}, {1}'.format(left.nlevels, left),
'{0}, {1}'.format(right.nlevels, right))
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, '{0} length are different'.format(obj),
'{0}, {1}'.format(len(left), left),
'{0}, {1}'.format(len(right), right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{0}]'.format(level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = '{0} values are different ({1} %)'.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
result = left_attr == right_attr
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr),
left_attr, right_attr)
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
return assert_almost_equal(seq, np.sort(np.array(seq)))
def assertIs(first, second, msg=''):
"""Checks that 'first' is 'second'"""
a, b = first, second
assert a is b, "%s: %r is not %r" % (msg.format(a, b), a, b)
def assertIsNot(first, second, msg=''):
"""Checks that 'first' is not 'second'"""
a, b = first, second
assert a is not b, "%s: %r is %r" % (msg.format(a, b), a, b)
def assertIn(first, second, msg=''):
"""Checks that 'first' is in 'second'"""
a, b = first, second
assert a in b, "%s: %r is not in %r" % (msg.format(a, b), a, b)
def assertNotIn(first, second, msg=''):
"""Checks that 'first' is not in 'second'"""
a, b = first, second
assert a not in b, "%s: %r is in %r" % (msg.format(a, b), a, b)
def assertIsNone(expr, msg=''):
"""Checks that 'expr' is None"""
return assertIs(expr, None, msg)
def assertIsNotNone(expr, msg=''):
"""Checks that 'expr' is not None"""
return assertIsNot(expr, None, msg)
def assertIsInstance(obj, cls, msg=''):
"""Test that obj is an instance of cls
(which can be a class or a tuple of classes,
as supported by isinstance())."""
assert isinstance(obj, cls), (
"%sExpected object to be of type %r, found %r instead" % (
msg, cls, type(obj)))
def assert_isinstance(obj, class_type_or_tuple, msg=''):
return deprecate('assert_isinstance', assertIsInstance)(obj, class_type_or_tuple, msg=msg)
def assertNotIsInstance(obj, cls, msg=''):
"""Test that obj is not an instance of cls
(which can be a class or a tuple of classes,
as supported by isinstance())."""
assert not isinstance(obj, cls), (
"%sExpected object to be of type %r, found %r instead" % (
msg, cls, type(obj)))
def assert_categorical_equal(res, exp):
if not array_equivalent(res.categories, exp.categories):
raise AssertionError(
'categories not equivalent: {0} vs {1}.'.format(res.categories,
exp.categories))
if not array_equivalent(res.codes, exp.codes):
raise AssertionError(
'codes not equivalent: {0} vs {1}.'.format(res.codes, exp.codes))
if res.ordered != exp.ordered:
raise AssertionError("ordered not the same")
def raise_assert_detail(obj, message, left, right):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
msg = """{0} are different
{1}
[left]: {2}
[right]: {3}""".format(obj, message, left, right)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right,
strict_nan=False, err_msg=None,
obj='numpy array'):
"""Checks that 'np_array' is equivalent to 'assert_equal'.
This is similar to ``numpy.testing.assert_array_equal``, but can
check equality including ``np.nan``. Two numpy arrays are regarded as
equivalent if the arrays have equal non-NaN elements,
and `np.nan` in corresponding locations.
"""
# compare shape and values
if array_equivalent(left, right, strict_nan=strict_nan):
return
if err_msg is None:
# show detailed error
if np.isscalar(left) and np.isscalar(right):
# show scalar comparison error
assert_equal(left, right)
elif is_list_like(left) and is_list_like(right):
# some test cases pass list
left = np.asarray(left)
right = np.array(right)
if left.shape != right.shape:
raise_assert_detail(obj, '{0} shapes are different'.format(obj),
left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{0} values are different ({1} %)'.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
elif is_list_like(left):
msg = "First object is iterable, second isn't"
raise_assert_detail(obj, msg, left, right)
else:
msg = "Second object is iterable, first isn't"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type=True,
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool, default False
Whether to check the Index class, dtype and inferred_type are identical.
check_series_type : bool, default False
Whether to check the Series class is identical.
check_less_precise : bool, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_dateteimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
assertIsInstance(left, Series, '[Series] ')
assertIsInstance(right, Series, '[Series] ')
if check_series_type:
assertIsInstance(left, type(right))
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, 'Series length are different',
'{0}, {1}'.format(len(left), left.index),
'{0}, {1}'.format(len(right), right.index))
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise, check_exact=check_exact,
obj='{0}.index'.format(obj))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
obj='{0}'.format(obj))
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check the values in that case
if is_datetimelike_v_numeric(left, right) or is_datetimelike_v_object(left, right) or needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
raise AssertionError(
'[datetimelike_compat=True] {0} is not equal to {1}.'.format(left.values,
right.values))
else:
assert_numpy_array_equal(left.values, right.values)
else:
assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise, obj='{0}'.format(obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool, default False
Whether to check the Index class, dtype and inferred_type are identical.
check_column_type : bool, default False
Whether to check the columns class, dtype and inferred_type are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_dateteimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
assertIsInstance(left, DataFrame, '[DataFrame] ')
assertIsInstance(right, DataFrame, '[DataFrame] ')
if check_frame_type:
assertIsInstance(left, type(right))
# shape comparison (row)
if left.shape[0] != right.shape[0]:
raise_assert_detail(obj, 'DataFrame shape (number of rows) are different',
'{0}, {1}'.format(left.shape[0], left.index),
'{0}, {1}'.format(right.shape[0], right.index))
# shape comparison (columns)
if left.shape[1] != right.shape[1]:
raise_assert_detail(obj, 'DataFrame shape (number of columns) are different',
'{0}, {1}'.format(left.shape[1], left.columns),
'{0}, {1}'.format(right.shape[1], right.columns))
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise, check_exact=check_exact,
obj='{0}.index'.format(obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise, check_exact=check_exact,
obj='{0}.columns'.format(obj))
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(lcol, rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
obj='DataFrame.iloc[:, {0}]'.format(i))
def assert_panelnd_equal(left, right,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False):
if check_panel_type:
assertIsInstance(left, type(right))
for axis in ['items', 'major_axis', 'minor_axis']:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
for i, item in enumerate(left._get_axis(0)):
assert item in right, "non-matching item (right) '%s'" % item
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
assert item in left, "non-matching item (left) '%s'" % item
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '%r'" % k
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements comparable with assert_almost_equal
Checks that the elements are equal, but not the same object. (Does not
check that items in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
assert elem1 is not elem2, "Expected object %r and object %r to be different objects, were same." % (
type(elem1), type(elem2))
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k))
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x,k), name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False,True], name=name)
return Index([False,True] + [False]*(k-2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
return Panel4D(dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper)))
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default names,
if false will use no names, if a list is given, the name of each level
in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False
or names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex,
u=makeUnicodeIndex, dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
'"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = prefix + '_l%d_g' % i + str(j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value at that position,
the default generator used yields values of the form "RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding index.
The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of
length N < idx_nlevels, for just the first N levels. If ndupe
doesn't divide nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated twice on first level,
# default names on both axis, single index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R%dC%d" % (r, c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checks. Copied this from Nipy/Nipype (Copyright of
# respective developers, license: BSD-3)
def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion,
exc_failed_import=ImportError,
exc_failed_check=RuntimeError):
"""Check that the minimal version of the required package is installed.
Parameters
----------
pkg_name : string
Name of the required package.
version : string, optional
Minimal version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
exc_failed_import : Exception, optional
Class of the exception to be thrown if import failed.
exc_failed_check : Exception, optional
Class of the exception to be thrown if version check failed.
Examples
--------
package_check('numpy', '1.3')
package_check('networkx', '1.0', 'tutorial1')
"""
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
if version:
msg += ' with version >= %s' % (version,)
try:
mod = __import__(pkg_name)
except ImportError:
raise exc_failed_import(msg)
if not version:
return
try:
have_version = mod.__version__
except AttributeError:
raise exc_failed_check('Cannot find version for %s' % pkg_name)
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
def skip_if_no_package(*args, **kwargs):
"""Raise SkipTest if package_check fails
Parameters
----------
*args Positional parameters passed to `package_check`
*kwargs Keyword parameters passed to `package_check`
"""
from nose import SkipTest
package_check(exc_failed_import=SkipTest,
exc_failed_check=SkipTest,
*args, **kwargs)
def skip_if_no_package_deco(pkg_name, version=None, app='pandas'):
from nose import SkipTest
def deco(func):
@wraps(func)
def wrapper(*args, **kwargs):
package_check(pkg_name, version=version, app=app,
exc_failed_import=SkipTest, exc_failed_check=SkipTest)
return func(*args, **kwargs)
return wrapper
return deco
#
# Additional tags decorators for nose
#
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,)
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check for connectivity.
Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
SkipTest
Errors not related to networking will always be raised.
"""
from nose import SkipTest
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
raise SkipTest
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
raise SkipTest("Skipping test due to known errno"
" and error %s" % e)
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
raise SkipTest("Skipping test because exception message is known"
" and error %s" % e)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
raise SkipTest("Skipping test due to lack of connectivity"
" and error %s" % e)
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assertRaises(_exception, _callable=None, *args, **kwargs):
"""assertRaises that is usable as context manager or in a with statement
Exceptions that don't match the given Exception type fall through::
>>> with assertRaises(ValueError):
... raise TypeError("banana")
...
Traceback (most recent call last):
...
TypeError: banana
If it raises the given Exception type, the test passes
>>> with assertRaises(KeyError):
... dct = dict()
... dct["apple"]
If the expected error doesn't occur, it raises an error.
>>> with assertRaises(KeyError):
... dct = {'apple':True}
... dct["apple"]
Traceback (most recent call last):
...
AssertionError: KeyError not raised.
In addition to using it as a contextmanager, you can also use it as a
function, just like the normal assertRaises
>>> assertRaises(TypeError, ",".join, [1, 3, 5])
"""
manager = _AssertRaisesContextmanager(exception=_exception)
# don't return anything if used in function form
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
def assertRaisesRegexp(_exception, _regexp, _callable=None, *args, **kwargs):
""" Port of assertRaisesRegexp from unittest in Python 2.7 - used in with statement.
Explanation from standard library:
Like assertRaises() but also tests that regexp matches on the string
representation of the raised exception. regexp may be a regular expression
object or a string containing a regular expression suitable for use by
re.search().
You can pass either a regular expression or a compiled regular expression object.
>>> assertRaisesRegexp(ValueError, 'invalid literal for.*XYZ',
... int, 'XYZ')
>>> import re
>>> assertRaisesRegexp(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assertRaisesRegexp(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assertRaisesRegexp(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assertRaisesRegexp(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""handles the behind the scenes work for assertRaises and assertRaisesRegexp"""
def __init__(self, exception, regexp=None, *args, **kwargs):
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
expected = self.exception
if not exc_type:
name = getattr(expected, "__name__", str(expected))
raise AssertionError("{0} not raised.".format(name))
if issubclass(exc_type, expected):
return self.handle_success(exc_type, exc_value, traceback)
return self.handle_failure(exc_type, exc_value, traceback)
def handle_failure(*args, **kwargs):
# Failed, so allow Exception to bubble up
return False
def handle_success(self, exc_type, exc_value, traceback):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
e = AssertionError('"%s" does not match "%s"' %
(self.regexp.pattern, str(val)))
raise_with_traceback(e, traceback)
return True
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [ clear ]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning, DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. File were warning"
" is raised: {0} != {1}. Warning message: {2}".format(
actual_warning.filename, caller.filename,
actual_warning.message))
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
def disabled(t):
t.disabled = True
return t
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
# Also provide all assert_* functions in the TestCase class
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isfunction(obj) and name.startswith('assert'):
setattr(TestCase, name, staticmethod(obj))
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image: https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
| mit |
ga7g08/ga7g08.github.io | research/GenerateIntroPlots.py | 1 | 1305 | import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['axes.labelsize'] = 18
plt.xkcd()
N = 100
F1 = 1.0
F2 = 1e-3
time = np.linspace(0, 10, N)
# Deterministic phase
phase = F1 * time + .5 * F2 * time**2
# --------- Uncorrelated Noise ---------------
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
phase_N1 = phase + np.random.normal(0, 1, N) # Add uncorrelated noise
res_N1 = phase_N1 - np.poly1d(np.polyfit(time, phase_N1, 2))(time) # Fit and remove
ax.plot(time, res_N1, "-o", markersize=3)
ax.set_xlabel("time")
ax.set_xticks([])
ax.set_ylabel("Residual")
ax.set_yticks([0])
ymax = 3*np.max(res_N1)
ax.set_ylim(-ymax, ymax)
ax.grid()
plt.savefig("img/UncorrelatedNoise.png")
plt.show()
# --------- Correlated Noise ---------------
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
phase_N1 = phase + np.cumsum(np.random.normal(0, 0.1, N)) # Add uncorrelated noise
res_N1 = phase_N1 - np.poly1d(np.polyfit(time, phase_N1, 2))(time) # Fit and remove
ax.plot(time, res_N1, "-o", markersize=3)
ax.set_xlabel("time")
ax.set_xticks([])
ax.set_ylabel("Residual")
ax.set_yticks([0])
ymax = 3*np.max(res_N1)
ax.set_ylim(-ymax, ymax)
ax.grid()
plt.savefig("img/CorrelatedNoise.png")
plt.show()
| mit |
ephes/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
python-sasx/sasx | sasx/magic.py | 1 | 1367 | from __future__ import division
import pandas
from IPython.core.magic import (Magics, magics_class, cell_magic)
from sasx.parse import sasx_parse
from sasx.code import sasx_preloop
from sasx.code import sasx_loop
from sasx.code import sasx_postloop
@magics_class
class SasxMagics(Magics):
"""Define Magic to run code in Simple dAta SyntaX (SASX).
%%sasx - Transform SASX code into Python code and execute it.
Special keywords recognised by SASX :
- data
- set
- drop
- keep
- output
- where ?
- _n_ ?
- groupby ?
"""
def __init__(self, shell):
super(SasxMagics, self).__init__(shell)
@cell_magic
def sasx(self, line_param, cell):
cell_parsed = sasx_parse(cell, self)
if cell_parsed['status']==0:
print(cell_parsed['message'])
return
#Generate python code
str_code = ""
str_code = str_code + sasx_preloop(cell_parsed)
str_code = str_code + sasx_loop(cell_parsed)
str_code = str_code + sasx_postloop(cell_parsed)
#Execute the code
ns = {}
print("-----")
print(str_code)
print("-----")
exec str_code in self.shell.user_ns, ns
# Register
ip = get_ipython()
ip.register_magics(SasxMagics)
| mit |
macks22/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
geoscixyz/em_examples | em_examples/DCIP_overburden_PseudoSection.py | 1 | 31511 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Maps, SolverLU, Utils
from SimPEG.Utils import ExtractCoreMesh
import numpy as np
from SimPEG.EM.Static import DC, IP
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import LogFormatter
from matplotlib import colors, ticker, cm
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.constants import epsilon_0
from scipy.interpolate import griddata
import copy
from ipywidgets import (
interact, interact_manual, IntSlider, FloatSlider,
FloatText, ToggleButtons, fixed, Widget
)
from .Base import widgetify
# Mesh, sigmaMap can be globals global
npad = 12
growrate = 2.
cs = 20.
hx = [(cs, npad, -growrate), (cs, 100), (cs, npad, growrate)]
hy = [(cs, npad, -growrate), (cs, 50)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
expmap = Maps.ExpMap(mesh)
mapping = expmap
xmin = -1000.
xmax = 1000.
ymin = -1000.
ymax = 100.
dx = 60.
xr = np.arange(xmin, xmax+1., dx)
dxr = np.diff(xr)
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = ExtractCoreMesh(xylim, mesh)
indx = (mesh.gridFx[:, 0] >= xmin) & (mesh.gridFx[:, 0] <= xmax) & (mesh.gridFx[:, 1] >= ymin) & (mesh.gridFx[:, 1] <= ymax)
indy = (mesh.gridFy[:, 0] >= xmin) & (mesh.gridFy[:, 0] <= xmax) & (mesh.gridFy[:, 1] >= ymin) & (mesh.gridFy[:, 1] <= ymax)
indF = np.concatenate((indx, indy))
nmax = 8
def model_valley(lnsig_air=np.log(1e-8), ln_sigback=np.log(1e-4),
ln_over=np.log(1e-2), ln_sigtarget=np.log(1e-3),
overburden_thick=200., overburden_wide=1000.,
target_thick=200., target_wide=400.,
a=1000., b=500., xc=0., zc=250.):
mtrue = ln_sigback*np.ones(mesh.nC)
mhalf = copy.deepcopy(mtrue)
ellips = (((mesh.gridCC[:, 0]-xc)**2.)/a**2. + ((mesh.gridCC[:, 1]-zc)**2.)/b**2.) <1.
mtrue[ellips] = lnsig_air
mair = copy.deepcopy(mtrue)
# overb = (mesh.gridCC[:, 1] >-overburden_thick) & (mesh.gridCC[:, 1]<=0)&(mesh.gridCC[:, 0] >-overburden_wide/2.)&(mesh.gridCC[:, 0] <overburden_wide/2.)
# mtrue[overb] = ln_over*np.ones_like(mtrue[overb])
if np.any(ellips):
bottom_valley = mesh.gridCC[ellips, 1].min()
overb = (mesh.gridCC[:, 1] >= bottom_valley) & (mesh.gridCC[:, 1] < bottom_valley+overburden_thick) & ellips
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])
mair[overb] = ln_sigback
else:
bottom_valley = 0.
mover = copy.deepcopy(mtrue)
target = (mesh.gridCC[:, 1] > bottom_valley-target_thick) & (mesh.gridCC[:, 1] < bottom_valley) & (mesh.gridCC[:, 0] > -target_wide/2.) & (mesh.gridCC[:, 0] < target_wide/2.)
mtrue[target] = ln_sigtarget*np.ones_like(mtrue[target])
mtrue = Utils.mkvc(mtrue)
return mtrue, mhalf, mair, mover
def findnearest(A):
idx = np.abs(mesh.gridCC[:, 0, None]-A).argmin(axis=0)
return mesh.gridCC[idx, 0]
def get_Surface(mtrue, A):
active = (mtrue > (np.log(1e-8)))
nearpoint = findnearest(A)
columns = mesh.gridCC[:, 0, None] == nearpoint
ind = np.logical_and(columns.T, active).T
idm = []
surface = []
for i in range(ind.shape[1]):
idm.append(np.where(np.all(mesh.gridCC == np.r_[nearpoint[i], np.max(mesh.gridCC[ind[:, i], 1])],
axis=1)))
surface.append(mesh.gridCC[idm[-1], 1])
return Utils.mkvc(np.r_[idm]), Utils.mkvc(np.r_[surface])
def model_fields(A, B, mtrue, mhalf, mair, mover, whichprimary='overburden'):
idA, surfaceA = get_Surface(mtrue, A)
idB, surfaceB = get_Surface(mtrue, B)
Mx = mesh.gridCC
# Nx = np.empty(shape =(mesh.nC, 2))
rx = DC.Rx.Pole(Mx)
# rx = DC.Rx.Dipole(Mx, Nx)
if(B == []):
src = DC.Src.Pole([rx], np.r_[A, surfaceA])
else:
src = DC.Src.Dipole([rx], np.r_[A, surfaceA], np.r_[B, surfaceB])
# src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
survey = DC.Survey([src])
# survey = DC.Survey([src])
# survey_prim = DC.Survey([src])
survey_prim = DC.Survey([src])
survey_air = DC.Survey([src])
# problem = DC.Problem3D_CC(mesh, sigmaMap = mapping)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
# problem_prim = DC.Problem3D_CC(mesh, sigmaMap = mapping)
problem_prim = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem_air = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.Solver = SolverLU
problem_prim.Solver = SolverLU
problem_air.Solver = SolverLU
problem.pair(survey)
problem_prim.pair(survey_prim)
problem_air.pair(survey_air)
mesh.setCellGradBC("neumann")
cellGrad = mesh.cellGrad
faceDiv = mesh.faceDiv
if whichprimary == 'air':
phi_primary = survey_prim.dpred(mair)
elif whichprimary == 'half':
phi_primary = survey_prim.dpred(mhalf)
elif whichprimary == 'overburden':
phi_primary = survey_prim.dpred(mover)
e_primary = -cellGrad*phi_primary
j_primary = problem_prim.MfRhoI*problem_prim.Grad*phi_primary
q_primary = epsilon_0*problem_prim.Vol*(faceDiv*e_primary)
primary_field = {'phi': phi_primary, 'e': e_primary, 'j': j_primary, 'q': q_primary}
phi_total = survey.dpred(mtrue)
e_total = -cellGrad*phi_total
j_total = problem.MfRhoI*problem.Grad*phi_total
q_total = epsilon_0*problem.Vol*(faceDiv*e_total)
total_field = {'phi': phi_total, 'e': e_total, 'j': j_total, 'q': q_total}
phi_air = survey.dpred(mair)
e_air = -cellGrad*phi_air
j_air = problem.MfRhoI*problem.Grad*phi_air
q_air = epsilon_0*problem.Vol*(faceDiv*e_air)
air_field = {'phi': phi_air, 'e': e_air, 'j': j_air, 'q': q_air}
return src, primary_field, air_field, total_field
def get_Surface_Potentials(mtrue, survey, src, field_obj):
phi = field_obj['phi']
CCLoc = mesh.gridCC
XLoc = np.unique(mesh.gridCC[:, 0])
surfaceInd, zsurfaceLoc = get_Surface(mtrue, XLoc)
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60., 0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return XLoc, phiSurface, phiScale
def getCylinderPoints(xc, zc, a, b):
xLocOrig1 = np.arange(-a, a+a/10., a/10.)
xLocOrig2 = np.arange(a, -a-a/10., -a/10.)
# Top half of cylinder
zLoc1 = b*np.sqrt(1.-(xLocOrig1/a)**2)+zc
# Bottom half of cylinder
zLoc2 = -b*np.sqrt(1.-(xLocOrig2/a)**2)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
cylinderPoints = np.vstack([np.vstack([xLoc1, zLoc1]).T, np.vstack([xLoc2, zLoc2]).T])
return cylinderPoints
def get_OverburdenPoints(cylinderPoints, overburden_thick):
bottom = cylinderPoints[:, 1].min()
indb = np.where(cylinderPoints[:, 1] < 0.)
overburdenPoints = [np.maximum(cylinderPoints[i, 1], bottom+overburden_thick) for i in indb]
return np.vstack([cylinderPoints[indb, 0], overburdenPoints]).T
def getPlateCorners(target_thick, target_wide, cylinderPoints):
bottom = cylinderPoints[:, 1].min()
xc = 0.
zc = bottom-0.5*target_thick
rotPlateCorners = np.array([[-0.5*target_wide, 0.5*target_thick], [0.5*target_wide, 0.5*target_thick],
[-0.5*target_wide, -0.5*target_thick], [0.5*target_wide, -0.5*target_thick]])
plateCorners = rotPlateCorners + np.hstack([np.repeat(xc, 4).reshape([4, 1]), np.repeat(zc, 4).reshape([4, 1])])
return plateCorners
def get_TargetPoints(target_thick, target_wide, ellips_b, ellips_zc):
xLocOrig1 = np.arange(-target_wide/2., target_wide/2.+target_wide/10., target_wide/10.)
xLocOrig2 = np.arange(target_wide/2., -target_wide/2.-target_wide/10., -target_wide/10.)
zloc1 = np.ones_like(xLocOrig1)*(ellips_b+ellips_zc)
zloc1 = np.ones_like(xLocOrig1)*(ellips_b+ellips_zc-target_thick)
corner
targetpoint = np.vstack([np.vstack([xLoc1, zLoc1]).T, np.vstack([xLoc2, zLoc2]).T])
def getSensitivity(survey, A, B, M, N, model):
if(survey == "Dipole-Dipole"):
rx = DC.Rx.Dipole(np.r_[M, 0.], np.r_[N, 0.])
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
elif(survey == "Pole-Dipole"):
rx = DC.Rx.Dipole(np.r_[M, 0.], np.r_[N, 0.])
src = DC.Src.Pole([rx], np.r_[A, 0.])
elif(survey == "Dipole-Pole"):
rx = DC.Rx.Pole(np.r_[M, 0.])
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
elif(survey == "Pole-Pole"):
rx = DC.Rx.Pole(np.r_[M, 0.])
src = DC.Src.Pole([rx], np.r_[A, 0.])
survey = DC.Survey([src])
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.Solver = SolverLU
problem.pair(survey)
fieldObj = problem.fields(model)
J = problem.Jtvec(model, np.array([1.]), f=fieldObj)
return J
def calculateRhoA(survey, VM, VN, A, B, M, N):
# to stabilize division
eps = 1e-9
if(survey == "Dipole-Dipole"):
G = 1. / (1./(np.abs(A-M)+eps) - 1./(np.abs(M-B)+eps) - 1./(np.abs(N-A)+eps) + 1./(np.abs(N-B)+eps))
rho_a = (VM-VN)*2.*np.pi*G
elif(survey == "Pole-Dipole"):
G = 1. / (1./(np.abs(A-M)+eps) - 1./(np.abs(N-A)+eps))
rho_a = (VM-VN)*2.*np.pi*G
elif(survey == "Dipole-Pole"):
G = 1. / (1./(np.abs(A-M)+eps) - 1./(np.abs(M-B)+eps))
rho_a = (VM)*2.*np.pi*G
elif(survey == "Pole-Pole"):
G = 1. / (1./(np.abs(A-M)+eps))
rho_a = (VM)*2.*np.pi*G
return rho_a
def getPseudoLocs(xr, ntx, nmax, flag="PoleDipole"):
xloc = []
yloc = []
for i in range(ntx):
if i < ntx-nmax+1:
if flag == 'DipoleDipole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
elif flag == 'PoleDipole':
txmid = xr[i]
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
elif flag == 'DipolePole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]
mid = (txmid+rxmid)*0.5
xloc.append(mid)
yloc.append(np.arange(nmax)+1.)
else:
if flag == 'DipoleDipole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
elif flag == 'PoleDipole':
txmid = xr[i]
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
elif flag == 'DipolePole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]
mid = (txmid+rxmid)*0.5
xloc.append(mid)
yloc.append(np.arange(mid.size)+1.)
xlocvec = np.hstack(xloc)
ylocvec = np.hstack(yloc)
return np.c_[xlocvec, ylocvec]
def DC2Dsurvey(mtrue, flag="PoleDipole", nmax=8):
if flag == "PoleDipole":
ntx = xr.size-2
elif flag == "DipolePole":
ntx = xr.size-2
elif flag == "DipoleDipole":
ntx = xr.size-3
else:
raise Exception('Not Implemented')
xzlocs = getPseudoLocs(xr, ntx, nmax, flag)
txList = []
zloc = -cs/2.
for i in range(ntx):
if flag == "PoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[mesh.vectorCCx.min(), zloc]
if i < ntx-nmax+1:
Mx = xr[i+1:i+1+nmax]
_, Mz = get_Surface(mtrue, Mx)
Nx = xr[i+2:i+2+nmax]
_, Nz = get_Surface(mtrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
else:
Mx = xr[i+1:ntx+1]
_, Mz = get_Surface(mtrue, Mx)
Nx = xr[i+2:i+2+nmax]
_, Nz = get_Surface(mtrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
elif flag == "DipolePole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax+1:
Mx = xr[i+2:i+2+nmax]
_, Mz = get_Surface(mtrue, Mx)
Nx = np.ones(nmax)*mesh.vectorCCx.max()
_, Nz = get_Surface(mtrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
else:
Mx = xr[i+2:ntx+2]
_, Mz = get_Surface(mtrue, Mx)
Nx = np.ones(ntx-i)*mesh.vectorCCx.max()
_, Nz = get_Surface(mtrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
elif flag == "DipoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax:
Mx = xr[i+2:i+2+nmax]
_, Mz = get_Surface(mtrue, Mx)
Nx = xr[i+3:i+3+nmax]
_, Nz = get_Surface(mtrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
else:
Mx = xr[i+2:len(xr)-1]
_, Mz = get_Surface(mtrue, Mx)
Nx = xr[i+3:len(xr)]
_, Nz = get_Surface(mtrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
rx = DC.Rx.Dipole(M, N)
src = DC.Src.Dipole([rx], A, B)
txList.append(src)
survey = DC.Survey(txList)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
return survey, xzlocs
def IP2Dsurvey(miptrue, sigmadc, flag="PoleDipole", nmax=8):
if flag == "PoleDipole":
ntx = xr.size-2
elif flag == "DipolePole":
ntx = xr.size-2
elif flag == "DipoleDipole":
ntx = xr.size-3
else:
raise Exception('Not Implemented')
xzlocs = getPseudoLocs(xr, ntx, nmax, flag)
txList = []
zloc = -cs/2.
for i in range(ntx):
if flag == "PoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[mesh.vectorCCx.min(), zloc]
if i < ntx-nmax+1:
Mx = xr[i+1:i+1+nmax]
_, Mz = get_Surface(miptrue, Mx)
Nx = xr[i+2:i+2+nmax]
_, Nz = get_Surface(miptrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
else:
Mx = xr[i+1:ntx+1]
_, Mz = get_Surface(miptrue, Mx)
Nx = xr[i+2:i+2+nmax]
_, Nz = get_Surface(miptrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
elif flag == "DipolePole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax+1:
Mx = xr[i+2:i+2+nmax]
_, Mz = get_Surface(miptrue, Mx)
Nx = np.ones(nmax)*mesh.vectorCCx.max()
_, Nz = get_Surface(miptrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
else:
Mx = xr[i+2:ntx+2]
_, Mz = get_Surface(miptrue, Mx)
Nx = np.ones(ntx-i)*mesh.vectorCCx.max()
_, Nz = get_Surface(miptrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
elif flag == "DipoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax:
Mx = xr[i+2:i+2+nmax]
_, Mz = get_Surface(miptrue, Mx)
Nx = xr[i+3:i+3+nmax]
_, Nz = get_Surface(miptrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
else:
Mx = xr[i+2:len(xr)-1]
_, Mz = get_Surface(miptrue, Mx)
Nx = xr[i+3:len(xr)]
_, Nz = get_Surface(miptrue, Nx)
M = np.c_[Mx, Mz]
N = np.c_[Nx, Nz]
rx = DC.Rx.Dipole(M, N)
src = DC.Src.Dipole([rx], A, B)
txList.append(src)
survey = IP.Survey(txList)
problem = IP.Problem3D_CC(mesh, sigma=sigmadc, etaMap=Maps.IdentityMap(mesh))
problem.pair(survey)
return survey, xzlocs
def PseudoSectionPlotfnc(i, j, survey, flag="PoleDipole"):
matplotlib.rcParams['font.size'] = 14
ntx = xr.size-2
TxObj = survey.srcList
TxLoc = TxObj[i].loc
RxLoc = TxObj[i].rxList[0].locs
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(xr.min()-5, xr.max()+5), ylim=(nmax+1, -2))
plt.plot(xr, np.zeros_like(xr), 'ko', markersize=4)
if flag == "PoleDipole":
plt.plot(TxLoc[0][0], np.zeros(1), 'rv', markersize=10)
# print([TxLoc[0][0],0])
ax.annotate('A', xy=(TxLoc[0][0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
else:
plt.plot([TxLoc[0][0],TxLoc[1][0]], np.zeros(2), 'rv', markersize=10)
# print([[TxLoc[0][0],0],[TxLoc[1][0],0]])
ax.annotate('A', xy=(TxLoc[0][0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('B', xy=(TxLoc[1][0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
# for i in range(ntx):
if i < ntx-nmax+1:
if flag == "PoleDipole":
txmid = TxLoc[0][0]
else:
txmid = (TxLoc[0][0] + TxLoc[1][0])*0.5
MLoc = RxLoc[0][j]
NLoc = RxLoc[1][j]
if flag == "DipolePole":
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
rxmid = MLoc[0]
else:
rxmid = (MLoc[0]+NLoc[0])*0.5
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
plt.plot(NLoc[0], np.zeros(1), 'b^', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
mid = (txmid+rxmid)*0.5
midSep = np.sqrt(np.square(txmid-rxmid))
plt.plot(txmid, np.zeros(1), 'ro')
plt.plot(rxmid, np.zeros(1), 'bo')
plt.plot(mid, midSep/2., 'go')
plt.plot(np.r_[txmid, mid], np.r_[0, midSep/2.], 'k:')
plt.plot(np.r_[rxmid, mid], np.r_[0, midSep/2.], 'k:')
else:
if flag == "PoleDipole":
txmid = TxLoc[0][0]
else:
txmid = (TxLoc[0][0] + TxLoc[1][0])*0.5
MLoc = RxLoc[0][j]
NLoc = RxLoc[1][j]
if flag == "DipolePole":
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
rxmid = MLoc[0]
else:
rxmid = (MLoc[0]+NLoc[0])*0.5
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
plt.plot(NLoc[0], np.zeros(1), 'b^', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
mid = (txmid+rxmid)*0.5
plt.plot((txmid+rxmid)*0.5, np.arange(mid.size)+1., 'bo')
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], 'k:')
for j in range(ntx-i):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.xlabel("X (m)")
plt.ylabel("N-spacing")
plt.xlim(xr.min()-5, xr.max()+5)
plt.ylim(nmax*dx/2+dx, -2*dx)
plt.show()
return
def DipoleDipolefun(i):
matplotlib.rcParams['font.size'] = 14
plt.figure(figsize=(10, 3))
ntx = xr.size-2
plt.plot(xr[:-1]+dxr*0.5, np.zeros_like(xr[:-1]), 'ko')
plt.plot(xr[i]+dxr[i]*0.5, np.zeros(1), 'ro')
# for i in range(ntx):
if i < ntx-nmax+1:
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
mid = (txmid+rxmid)*0.5
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(mid, np.arange(nmax)+1., 'bo')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, nmax], 'k:')
for j in range(nmax):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
else:
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
mid = (txmid+rxmid)*0.5
plt.plot((txmid+rxmid)*0.5, np.arange(mid.size)+1., 'bo')
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], 'k:')
for j in range(ntx-i):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.xlabel("X (m)")
plt.ylabel("N-spacing")
plt.xlim(xr.min(), xr.max())
plt.ylim(nmax+1, -1)
plt.show()
return
def PseudoSectionWidget(survey, flag):
if flag == "PoleDipole":
ntx, nmax = xr.size-2, 8
dxr = np.diff(xr)
elif flag == "DipolePole":
ntx, nmax = xr.size-1, 7
dxr = xr
elif flag == "DipoleDipole":
ntx, nmax = xr.size-3, 8
dxr = np.diff(xr)
xzlocs = getPseudoLocs(dxr, ntx, nmax, flag)
PseudoSectionPlot = lambda i,j,flag: PseudoSectionPlotfnc(i, j, survey, flag)
return widgetify(PseudoSectionPlot,
i=IntSlider(min=0, max=ntx-1, step=1, value=0),
j=IntSlider(min=0, max=nmax-1, step=1, value=0),
flag=ToggleButtons(options=['DipoleDipole', 'PoleDipole', 'DipolePole'],
description='Array Type'),)
def MidpointPseudoSectionWidget():
ntx = xr.size-2
return widgetify(DipoleDipolefun, i=IntSlider(min=0, max=ntx-1, step=1, value=0))
def DCIP2Dfwdfun(mesh, mapping,
rhohalf, rholayer, rhoTarget,
chghalf, chglayer, chgTarget,
overburden_thick, overburden_wide,
target_thick, target_wide, ellips_a, ellips_b, xc, zc,
predmis, surveyType, nmax=8, which='DC', Scale='Linear'):
matplotlib.rcParams['font.size'] = 14
ln_sigTarget = np.log(1./rhoTarget)
ln_sigLayer = np.log(1./rholayer)
ln_sigHalf = np.log(1./rhohalf)
mtrue, mhalf, mair, mover = model_valley(lnsig_air=np.log(1e-8),
ln_sigback=ln_sigHalf,
ln_over=ln_sigLayer,
ln_sigtarget=ln_sigTarget,
overburden_thick =overburden_thick,
target_thick=target_thick,
target_wide =target_wide,
a=ellips_a, b=ellips_b,
xc=xc, zc=zc)
mdctrue = mtrue
if which == 'IP':
mtrue, mhalf, mair, mover = model_valley(lnsig_air=0.,
ln_sigback=chghalf,
ln_over=chglayer,
ln_sigtarget=chgTarget,
overburden_thick=overburden_thick,
target_thick=target_thick,
target_wide=target_wide,
a=ellips_a, b=ellips_b, xc=xc,zc=zc)
sigmadc = 1./(mapping*mdctrue)
survey, xzlocs = IP2Dsurvey(mtrue, sigmadc, surveyType, nmax=nmax)
else:
survey, xzlocs = DC2Dsurvey(mtrue, surveyType, nmax=nmax)
dmover = survey.dpred(mover)
dpred = survey.dpred(mtrue)
xi, yi = np.meshgrid(np.linspace(xr.min(), xr.max(), 120), np.linspace(1., nmax, 100))
# Cheat to compute a geometric factor
# define as G = dV_halfspace / rho_halfspace
if which == 'IP':
mtest = 10.*np.ones_like(mtrue)
mtest[mdctrue == np.log(1e-8)] = 0.
dhalf = survey.dpred(mtest)
appresover = 10.*(dmover/dhalf)
apprestrue = 10.*(dpred/dhalf)
else:
dmair = survey.dpred(mair)
appresover = dmover/dmair/np.exp(ln_sigHalf)
apprestrue = dpred/dmair/np.exp(ln_sigHalf)
dtrue = griddata(xzlocs, apprestrue, (xi, yi), method='linear')
dtrue = np.ma.masked_where(np.isnan(dtrue), dtrue)
dover = griddata(xzlocs, appresover, (xi, yi), method='linear')
dover = np.ma.masked_where(np.isnan(dover), dover)
if which == 'IP':
label = 'Chargeability'
else:
label = 'Resistivity (Ohm-m)'
fig = plt.figure(figsize=(12, 9))
ax1 = plt.subplot(311)
if which == 'IP':
u = np.ma.masked_where(mdctrue <= np.log(1e-8), mtrue)
else:
u = np.ma.masked_where(mtrue <= np.log(1e-8), np.log10(1./(mapping*mtrue)))
dat1 = mesh.plotImage(u, ax=ax1, clim=(u.min(), u.max()),
grid=True, gridOpts={'color': 'k', 'alpha': 0.5})
if which == 'IP':
cb1 = plt.colorbar(dat1[0], ax=ax1)
else:
cb1ticks = np.linspace(u.min(), u.max(), 3)
cb1 = plt.colorbar(dat1[0], ax=ax1, ticks=cb1ticks)
cb1.ax.set_yticklabels(['{:.0f}'.format(10**x) for x in cb1ticks])
cb1.set_label(label)
ax1.set_ylim(ymin, ymax)
ax1.set_xlim(xmin, xmax)
ax1.set_xlabel("")
ax1.set_ylabel("Depth (m)")
ax2 = plt.subplot(312)
if Scale == 'Log':
lev_exp = np.arange(np.floor(np.log10(np.abs(dtrue.min()))),
np.ceil(np.log10(dtrue.max()))+0.1,0.1)
lev = np.power(10, lev_exp)
dat2 = ax2.contourf(xi, yi, dtrue, lev, locator=ticker.LogLocator())
ax2.contour(xi, yi, dtrue, lev, locator=ticker.LogLocator(), colors='k', alpha=0.5)
ax2.plot(xzlocs[:, 0], xzlocs[:, 1],'k.', ms=3)
cb2 = plt.colorbar(dat2, ax=ax2,ticks=np.linspace(appresover.min(),
appresover.max(), 5), format="%4.0f")
else:
dat2 = ax2.contourf(xi, yi, dtrue, 10)
ax2.contour(xi, yi, dtrue, 10, colors='k', alpha=0.5)
ax2.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb2 = plt.colorbar(dat2, ax=ax2)
cb2.set_label('Apparent\n'+label)
ax2.set_ylim(nmax+1, 0.)
ax2.set_ylabel("N-spacing")
ax2.text(250, nmax-1, "Observed")
ax3 = plt.subplot(313)
if predmis == "Data Without Target":
if Scale == 'Log':
dat3 = ax3.contourf(xi, yi, dover, lev, locator=ticker.LogLocator())
ax3.contour(xi, yi, dover, lev, locator=ticker.LogLocator(), colors='k', alpha=0.5)
ax3.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb3 = plt.colorbar(dat3, ax=ax3,
ticks=np.linspace(appresover.min(), appresover.max(), 5),
format="%4.0f")
else:
dat3 = ax3.contourf(xi, yi, dover, 10, vmin=dtrue.min(), vmax=dtrue.max())
ax3.contour(xi, yi, dover, 10, vmin=dtrue.min(),vmax=dtrue.max(), colors='k', alpha=0.5)
ax3.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb3 = plt.colorbar(dat3, ax=ax3, format="%4.0f")
cb3.set_clim(cb2.get_clim())
cb3.set_label('Apparent\n'+label)
ax3.text(250, nmax-1, "Predicted\nwithout Target")
else:
if predmis == "Difference":
mis = (apprestrue-appresover)
Mis = griddata(xzlocs, mis, (xi, yi), method='linear')
if which == 'IP':
diflabel = 'Difference (chg unit)'
else:
diflabel = 'Difference (Ohm-m)'
else:
mis = (apprestrue-appresover)/apprestrue
Mis = griddata(xzlocs, mis, (xi, yi), method='linear')
diflabel = 'Normalized Difference (%)'
dat3 = ax3.contourf(xi, yi, Mis, 10)
ax3.contour(xi, yi, Mis, 10, colors='k', alpha=0.5)
ax3.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb3 = plt.colorbar(dat3, ax=ax3, format="%4.2f")
cb3.set_label(diflabel)
ax3.text(-38, 7, diflabel)
ax3.set_ylim(nmax+1, 0.)
ax3.set_ylabel("N-spacing")
ax3.set_xlabel("Distance (m)")
plt.show()
return
def DC2DfwdWrapper(rhohalf, rholayer, rhoTarget,
chghalf, chglayer, chgTarget,
overburden_thick, overburden_wide,
target_thick, target_wide, ellips_a, ellips_b, xc, zc,
predmis, surveyType, nmax, which, Scale):
DCIP2Dfwdfun(mesh, mapping, rhohalf, rholayer, rhoTarget,
chghalf, chglayer, chgTarget,
overburden_thick, overburden_wide,
target_thick, target_wide, ellips_a, ellips_b, xc, zc,
predmis, surveyType, nmax, which, Scale)
return None
def DCIP2DfwdWidget():
return widgetify(
DC2DfwdWrapper,
xc=FloatSlider(min=-1005., max=1000., step=10., value=0.,
continuous_update=False),
zc=FloatSlider(min=-1000., max=1000., step=10., value=250.,
continuous_update=False),
ellips_a=FloatSlider(min=10., max=10000., step=100., value=1000.,
continuous_update=False),
ellips_b=FloatSlider(min=10., max=10000., step=100., value=500.,
continuous_update=False),
rhohalf=FloatText(min=1e-8, max=1e8, value=1000.,
description='$\\rho_1$',
continuous_update=False),
chghalf=FloatText(min=0., max=100, value=0.,
description='$\\eta_1$',
continuous_update=False),
rholayer=FloatText(min=1e-8, max=1e8, value=100.,
description='$\\rho_2$',
continuous_update=False),
chglayer=FloatText(min=0., max=100, value=20.,
description='$\\eta_2$',
continuous_update=False),
rhoTarget=FloatText(min=1e-8, max=1e8, value=500.,
description='$\\rho_3$',
continuous_update=False),
chgTarget=FloatText(min=0., max=100, value=10.,
description='$\\eta_3$',
continuous_update=False),
overburden_thick=FloatSlider(min=0., max=1000., step=10., value=250.,
continuous_update=False),
overburden_wide=fixed(2000.),
target_thick=FloatSlider(min=0., max=1000., step=10., value=200.,
continuous_update=False),
target_wide=FloatSlider(min=0., max=1000., step=10., value=200.,
continuous_update=False),
predmis=ToggleButtons(options=["Data Without Target", 'Difference', 'Normalized Difference']),
surveyType=ToggleButtons(options=['DipoleDipole', 'PoleDipole', 'DipolePole'],
desciption='Array Type'),
which=ToggleButtons(options=['DC', 'IP'], description='Survey'),
nmax=IntSlider(min=1, max=16, value=8, description='Rx per Tx'),
Scale=ToggleButtons(options=['Linear', 'Log'])
)
| mit |
HaydenFaulkner/bottom-up-attention | tools/train_svms.py | 16 | 13480 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
xiaohan2012/lst | twitter_util.py | 1 | 1975 | import itertools
import pandas as pd
import langdetect
from langdetect import detect
from merge_similar_messages import merge_messages
from datetime import timedelta
def remove_mentions_and_urls(df):
def aux(r):
body = r['body'].lower()
mentions = map(lambda m: '@' + m, r['mentions'])
for s in itertools.chain(mentions, r['urls']):
body = body.replace(s.lower(), '')
return body
df['body'] = df['body'].map(lambda s: s.lower())
df['body'] = df[['body', 'mentions', 'urls']].apply(
aux,
axis=1
)
return df
def detect_lan(msg):
try:
return detect(msg)
except langdetect.lang_detect_exception.LangDetectException:
return ''
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', required=True)
parser.add_argument('--hashtag_ban')
args = parser.parse_args()
try:
df = pd.read_json('data/{}/interactions.json'.format(args.dataset))
except (ValueError, IOError):
df = pd.read_pickle('data/{}/interactions.pkl'.format(args.dataset))
df = df.drop_duplicates(subset=['message_id'])
df['hashtags'] = df['hashtags'].apply(
lambda hs: list(set(map(lambda s: s.lower(), hs)))
)
if args.hashtag_ban:
df['hashtags'] = df['hashtags'].apply(
lambda hs: filter(lambda h: h != args.hashtag_ban, hs)
)
df = remove_mentions_and_urls(df)
df = df[df['body'].map(len) > 10] # filter short body
# df = df[df['body'].map(detect_lan) == 'en'] # non english
df = merge_messages(df,
timedelta(minutes=30),
50,
'datetime')
# df.to_json('data/{}/interactions_new.json'.format(args.dataset),
# orient='records')
df.to_pickle('data/{}/interactions.pkl'.format(args.dataset))
if __name__ == '__main__':
main()
| mit |
Aerotenna/Firmware | Tools/ecl_ekf/batch_process_metadata_ekf.py | 3 | 31686 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
"""
Performs a composite analysis of ekf log analysis meta data for all .ulg.csv files in the specified directory
Generates and saves histogram plots for the meta data in population_data.pdf
Generates and saves population summary data in population_data.csv
"""
parser = argparse.ArgumentParser(description='Perform a composite analysis of ekf log analysis meta data for all .ulg.csv files in the specified directory')
parser.add_argument("directory_path")
def is_valid_directory(parser, arg):
if os.path.isdir(arg):
# Directory exists so return the directory
return arg
else:
parser.error('The directory {} does not exist'.format(arg))
args = parser.parse_args()
metadata_directory = args.directory_path
# Run the metadata analsyis tool to generate population statistics
# Loop through the csv files in the directory and load the metadata into a nested dictionary
print("\n"+"analysing all .ulog.csv files in "+metadata_directory)
population_data = {}
for filename in os.listdir(metadata_directory):
if filename.endswith(".mdat.csv"):
print("loading "+filename)
# get the dictionary of fail and warning test thresholds from a csv file
file = open(metadata_directory+"/"+filename)
single_log_data = { } # meta data dictionary for a single log
for line in file:
x = line.split(",")
a=x[0]
b=x[1]
c=x[2]
try:
single_log_data[a]=float(b)
except:
single_log_data[a]=b
file.close()
population_data[filename]=single_log_data
# # print out the check levels
# print('\n'+'The following metadata loaded from '+filename+' were used'+'\n')
# val = population_data.get(filename, {}).get('imu_hfdang_mean')
# print(val)
# Open pdf file for plotting
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "population_data.pdf"
pp = PdfPages(metadata_directory+"/"+output_plot_filename)
# get statistics for the population
population_results = {
'master_warning_pct':[float('NaN'),'Percentage of logs with warnings'],
'master_fail_pct':[float('NaN'),'Percentage of logs with fails'],
'mag_warning_pct':[float('NaN'),'Percentage of logs with magnetometer sensor warnings'],
'mag_fail_pct':[float('NaN'),'Percentage of logs with magnetometer sensor fails'],
'yaw_warning_pct':[float('NaN'),'Percentage of logs with yaw sensor warnings'],
'yaw_fail_pct':[float('NaN'),'Percentage of logs with yaw sensor fails'],
'vel_warning_pct':[float('NaN'),'Percentage of logs with velocity sensor warnings'],
'vel_fail_pct':[float('NaN'),'Percentage of logs with velocity sensor fails'],
'pos_warning_pct':[float('NaN'),'Percentage of logs with position sensor warnings'],
'pos_fail_pct':[float('NaN'),'Percentage of logs with position sensor fails'],
'hgt_warning_pct':[float('NaN'),'Percentage of logs with height sensor warnings'],
'hgt_fail_pct':[float('NaN'),'Percentage of logs with height sensor fails'],
'hagl_warning_pct':[float('NaN'),'Percentage of logs with height above ground sensor warnings'],
'hagl_fail_pct':[float('NaN'),'Percentage of logs with height above ground sensor fails'],
'tas_warning_pct':[float('NaN'),'Percentage of logs with airspeed sensor warnings'],
'tas_fail_pct':[float('NaN'),'Percentage of logs with airspeed ground sensor fails'],
'mag_test_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the magnetic field sensor innovation consistency test ratio'],
'mag_test_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the magnetic field sensor innovation consistency test ratio'],
'vel_test_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the velocity sensor innovation consistency test ratio'],
'vel_test_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the velocity sensor innovation consistency test ratio'],
'pos_test_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the position sensor innovation consistency test ratio'],
'pos_test_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the position sensor innovation consistency test ratio'],
'hgt_test_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the height sensor innovation consistency test ratio'],
'hgt_test_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the height sensor innovation consistency test ratio'],
'tas_test_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the airspeed sensor innovation consistency test ratio'],
'tas_test_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the airspeed sensor innovation consistency test ratio'],
'hagl_test_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the height above ground sensor innovation consistency test ratio'],
'hagl_test_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the height above ground sensor innovation consistency test ratio'],
'ofx_fail_pct_avg':[float('NaN'),'The mean percentage of innovation test fails for the X axis optical flow sensor'],
'ofy_fail_pct_avg':[float('NaN'),'The mean percentage of innovation test fails for the Y axis optical flow sensor'],
'imu_coning_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the IMU delta angle coning vibration level (mrad)'],
'imu_coning_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the IMU delta angle coning vibration level (mrad)'],
'imu_hfdang_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the IMU high frequency delta angle vibration level (mrad)'],
'imu_hfdang_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the IMU delta high frequency delta angle vibration level (mrad)'],
'imu_hfdvel_max_avg':[float('NaN'),'The mean of the maximum in-flight values of the IMU high frequency delta velocity vibration level (m/s)'],
'imu_hfdvel_mean_avg':[float('NaN'),'The mean of the mean in-flight value of the IMU delta high frequency delta velocity vibration level (m/s)'],
'obs_ang_median_avg':[float('NaN'),'The mean of the median in-flight value of the output observer angular tracking error magnitude (mrad)'],
'obs_vel_median_avg':[float('NaN'),'The mean of the median in-flight value of the output observer velocity tracking error magnitude (m/s)'],
'obs_pos_median_avg':[float('NaN'),'The mean of the median in-flight value of the output observer position tracking error magnitude (m)'],
}
# get population summary statistics
found_keys = population_data.keys()
# master status
result = [population_data[k].get('master_status') for k in found_keys]
population_results['master_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['master_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# magnetometer sensor
result = [population_data[k].get('mag_sensor_status') for k in found_keys]
population_results['mag_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['mag_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# yaw sensor
result = [population_data[k].get('yaw_sensor_status') for k in found_keys]
population_results['yaw_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['yaw_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# velocity sensor
result = [population_data[k].get('vel_sensor_status') for k in found_keys]
population_results['vel_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['vel_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# position sensor
result = [population_data[k].get('pos_sensor_status') for k in found_keys]
population_results['pos_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['pos_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# height sensor
result = [population_data[k].get('hgt_sensor_status') for k in found_keys]
population_results['hgt_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['hgt_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# height above ground sensor
result = [population_data[k].get('hagl_sensor_status') for k in found_keys]
population_results['hagl_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['hagl_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# height above ground sensor
result = [population_data[k].get('tas_sensor_status') for k in found_keys]
population_results['tas_warning_pct'][0] = 100.0 * result.count('Warning') / len(result)
population_results['tas_fail_pct'][0] = 100.0 * result.count('Fail') / len(result)
# Mean and max innovation test levels
# Magnetometer
temp = np.asarray([population_data[k].get('mag_test_max') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('mag_test_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['mag_test_max_avg'][0] = np.mean(result1)
population_results['mag_test_mean_avg'][0] = np.mean(result2)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - Magnetometer Innovation Test Ratio Maximum")
plt.xlabel("mag_test_max")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - Magnetometer Innovation Test Ratio Mean")
plt.xlabel("mag_test_mean")
plt.ylabel("Frequency")
pp.savefig()
plt.close(1)
# Velocity Sensor (GPS)
temp = np.asarray([population_data[k].get('vel_test_max') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('vel_test_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['vel_test_max_avg'][0] = np.mean(result1)
population_results['vel_test_mean_avg'][0] = np.mean(result2)
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - Velocity Innovation Test Ratio Maximum")
plt.xlabel("vel_test_max")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - Velocity Innovation Test Ratio Mean")
plt.xlabel("vel_test_mean")
plt.ylabel("Frequency")
pp.savefig()
plt.close(2)
# Position Sensor (GPS or external vision)
temp = np.asarray([population_data[k].get('pos_test_max') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('pos_test_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['pos_test_max_avg'][0] = np.mean(result1)
population_results['pos_test_mean_avg'][0] = np.mean(result2)
plt.figure(3,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - Position Innovation Test Ratio Maximum")
plt.xlabel("pos_test_max")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - Position Innovation Test Ratio Mean")
plt.xlabel("pos_test_mean")
plt.ylabel("Frequency")
pp.savefig()
plt.close(3)
# Height Sensor
temp = np.asarray([population_data[k].get('hgt_test_max') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('hgt_test_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['hgt_test_max_avg'][0] = np.mean(result1)
population_results['hgt_test_mean_avg'][0] = np.mean(result2)
plt.figure(4,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - Height Innovation Test Ratio Maximum")
plt.xlabel("pos_test_max")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - Height Innovation Test Ratio Mean")
plt.xlabel("pos_test_mean")
plt.ylabel("Frequency")
pp.savefig()
plt.close(4)
# Airspeed Sensor
temp = np.asarray([population_data[k].get('tas_test_max') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('tas_test_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['tas_test_max_avg'][0] = np.mean(result1)
population_results['tas_test_mean_avg'][0] = np.mean(result2)
plt.figure(5,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - Airspeed Innovation Test Ratio Maximum")
plt.xlabel("tas_test_max")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - Airspeed Innovation Test Ratio Mean")
plt.xlabel("tas_test_mean")
plt.ylabel("Frequency")
pp.savefig()
plt.close(5)
# Height Above Ground Sensor
temp = np.asarray([population_data[k].get('hagl_test_max') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('hagl_test_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['hagl_test_max_avg'][0] = np.mean(result1)
population_results['hagl_test_mean_avg'][0] = np.mean(result2)
plt.figure(6,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - HAGL Innovation Test Ratio Maximum")
plt.xlabel("hagl_test_max")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - HAGL Innovation Test Ratio Mean")
plt.xlabel("hagl_test_mean")
plt.ylabel("Frequency")
pp.savefig()
plt.close(6)
# Optical Flow Sensor
temp = np.asarray([population_data[k].get('ofx_fail_percentage') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('ofy_fail_percentage') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['ofx_fail_pct_avg'][0] = np.mean(result1)
population_results['ofy_fail_pct_avg'][0] = np.mean(result2)
plt.figure(7,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - Optical Flow X Axis Fail Percentage")
plt.xlabel("ofx_fail_percentage")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - Optical Flow Y Axis Fail Percentage")
plt.xlabel("ofy_fail_percentage")
plt.ylabel("Frequency")
pp.savefig()
plt.close(7)
# IMU coning vibration levels
temp = np.asarray([population_data[k].get('imu_coning_peak') for k in found_keys])
result1 = 1000.0 * temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('imu_coning_mean') for k in found_keys])
result2 = 1000.0 * temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['imu_coning_max_avg'][0] = np.mean(result1)
population_results['imu_coning_mean_avg'][0] = np.mean(result2)
plt.figure(8,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - IMU Coning Vibration Peak")
plt.xlabel("imu_coning_max (mrad)")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - IMU Coning Vibration Mean")
plt.xlabel("imu_coning_mean (mrad)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(8)
# IMU high frequency delta angle vibration levels
temp = np.asarray([population_data[k].get('imu_hfdang_peak') for k in found_keys])
result1 = 1000.0 * temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('imu_hfdang_mean') for k in found_keys])
result2 = 1000.0 * temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['imu_hfdang_max_avg'][0] = np.mean(result1)
population_results['imu_hfdang_mean_avg'][0] = np.mean(result2)
plt.figure(9,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - IMU HF Delta Angle Vibration Peak")
plt.xlabel("imu_hfdang_max (mrad)")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - IMU HF Delta Angle Vibration Mean")
plt.xlabel("imu_hfdang_mean (mrad)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(9)
# IMU high frequency delta velocity vibration levels
temp = np.asarray([population_data[k].get('imu_hfdvel_peak') for k in found_keys])
result1 = temp[np.isfinite(temp)]
temp = np.asarray([population_data[k].get('imu_hfdvel_mean') for k in found_keys])
result2 = temp[np.isfinite(temp)]
if (len(result1) > 0 and len(result2) > 0):
population_results['imu_hfdvel_max_avg'][0] = np.mean(result1)
population_results['imu_hfdvel_mean_avg'][0] = np.mean(result2)
plt.figure(10,figsize=(20,13))
plt.subplot(2,1,1)
plt.hist(result1)
plt.title("Gaussian Histogram - IMU HF Delta Velocity Vibration Peak")
plt.xlabel("imu_hfdvel_max (m/s)")
plt.ylabel("Frequency")
plt.subplot(2,1,2)
plt.hist(result2)
plt.title("Gaussian Histogram - IMU HF Delta Velocity Vibration Mean")
plt.xlabel("imu_hfdvel_mean (m/s)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(10)
# Output Observer Angular Tracking
temp = np.asarray([population_data[k].get('output_obs_ang_err_median') for k in found_keys])
result = 1000.0 * temp[np.isfinite(temp)]
if (len(result) > 0):
population_results['obs_ang_median_avg'][0] = np.mean(result)
plt.figure(11,figsize=(20,13))
plt.hist(result)
plt.title("Gaussian Histogram - Output Observer Angular Tracking Error Median")
plt.xlabel("output_obs_ang_err_median (mrad)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(11)
# Output Observer Velocity Tracking
temp = np.asarray([population_data[k].get('output_obs_vel_err_median') for k in found_keys])
result = temp[np.isfinite(temp)]
if (len(result) > 0):
population_results['obs_vel_median_avg'][0] = np.mean(result)
plt.figure(12,figsize=(20,13))
plt.hist(result)
plt.title("Gaussian Histogram - Output Observer Velocity Tracking Error Median")
plt.xlabel("output_obs_ang_err_median (m/s)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(12)
# Output Observer Position Tracking
temp = np.asarray([population_data[k].get('output_obs_pos_err_median') for k in found_keys])
result = temp[np.isfinite(temp)]
if (len(result) > 0):
population_results['obs_pos_median_avg'][0] = np.mean(result)
plt.figure(13,figsize=(20,13))
plt.hist(result)
plt.title("Gaussian Histogram - Output Observer Position Tracking Error Median")
plt.xlabel("output_obs_ang_err_median (m)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(13)
# IMU delta angle bias
temp = np.asarray([population_data[k].get('imu_dang_bias_median') for k in found_keys])
result = temp[np.isfinite(temp)]
if (len(result) > 0):
plt.figure(14,figsize=(20,13))
plt.hist(result)
plt.title("Gaussian Histogram - IMU Delta Angle Bias Median")
plt.xlabel("imu_dang_bias_median (rad)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(14)
# IMU delta velocity bias
temp = np.asarray([population_data[k].get('imu_dvel_bias_median') for k in found_keys])
result = temp[np.isfinite(temp)]
if (len(result) > 0):
plt.figure(15,figsize=(20,13))
plt.hist(result)
plt.title("Gaussian Histogram - IMU Delta Velocity Bias Median")
plt.xlabel("imu_dvel_bias_median (m/s)")
plt.ylabel("Frequency")
pp.savefig()
plt.close(15)
# close the pdf file
pp.close()
print('Population summary plots saved in population_data.pdf')
# don't display to screen
#plt.show()
# clase all figures
plt.close("all")
# write metadata to a .csv file
population_results_filename = metadata_directory + "/population_data.csv"
file = open(population_results_filename,"w")
file.write("name,value,description\n")
# loop through the dictionary and write each entry on a separate row, with data comma separated
# save data in alphabetical order
key_list = list(population_results.keys())
key_list.sort()
for key in key_list:
file.write(key+","+str(population_results[key][0])+","+population_results[key][1]+"\n")
file.close()
print('Population summary data saved in population_data.csv')
single_log_results = {
'filter_faults_max':[float('NaN'),'Largest recorded value of the filter internal fault bitmask. Should always be zero.'],
'hagl_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the height above ground sensor innovation consistency test.'],
'hagl_percentage_amber':[float('NaN'),'The percentage of in-flight height above ground sensor innovation consistency test values > 0.5.'],
'hagl_percentage_red':[float('NaN'),'The percentage of in-flight height above ground sensor innovation consistency test values > 1.0.'],
'hagl_sensor_status':['Pass','Height above ground sensor check summary. This sensor data is normally sourced from a rangefinder sensor. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'hagl_test_max':[float('NaN'),'The maximum in-flight value of the height above ground sensor innovation consistency test ratio.'],
'hagl_test_mean':[float('NaN'),'The mean in-flight value of the height above ground sensor innovation consistency test ratio.'],
'hgt_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the height sensor innovation consistency test.'],
'hgt_percentage_amber':[float('NaN'),'The percentage of in-flight height sensor innovation consistency test values > 0.5.'],
'hgt_percentage_red':[float('NaN'),'The percentage of in-flight height sensor innovation consistency test values > 1.0.'],
'hgt_sensor_status':['Pass','Height sensor check summary. This sensor data can be sourced from either Baro, GPS, range fidner or external vision system. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'hgt_test_max':[float('NaN'),'The maximum in-flight value of the height sensor innovation consistency test ratio.'],
'hgt_test_mean':[float('NaN'),'The mean in-flight value of the height sensor innovation consistency test ratio.'],
'imu_coning_mean':[float('NaN'),'Mean in-flight value of the IMU delta angle coning vibration metric (rad)'],
'imu_coning_peak':[float('NaN'),'Peak in-flight value of the IMU delta angle coning vibration metric (rad)'],
'imu_hfdang_mean':[float('NaN'),'Mean in-flight value of the IMU delta angle high frequency vibration metric (rad)'],
'imu_hfdang_peak':[float('NaN'),'Peak in-flight value of the IMU delta angle high frequency vibration metric (rad)'],
'imu_hfdvel_mean':[float('NaN'),'Mean in-flight value of the IMU delta velocity high frequency vibration metric (m/s)'],
'imu_hfdvel_peak':[float('NaN'),'Peak in-flight value of the IMU delta velocity high frequency vibration metric (m/s)'],
'imu_sensor_status':['Pass','IMU sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'in_air_transition_time':[float('NaN'),'The time in seconds measured from startup that the EKF transtioned into in-air mode. Set to a nan if a transition event is not detected.'],
'mag_percentage_amber':[float('NaN'),'The percentage of in-flight consolidated magnetic field sensor innovation consistency test values > 0.5.'],
'mag_percentage_red':[float('NaN'),'The percentage of in-flight consolidated magnetic field sensor innovation consistency test values > 1.0.'],
'mag_sensor_status':['Pass','Magnetometer sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'mag_test_max':[float('NaN'),'The maximum in-flight value of the magnetic field sensor innovation consistency test ratio.'],
'mag_test_mean':[float('NaN'),'The mean in-flight value of the magnetic field sensor innovation consistency test ratio.'],
'magx_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the X-axis magnetic field sensor innovation consistency test.'],
'magy_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the Y-axis magnetic field sensor innovation consistency test.'],
'magz_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the Z-axis magnetic field sensor innovation consistency test.'],
'master_status':['Pass','Master check status which can be either Pass Warning or Fail. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'ofx_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the optical flow sensor X-axis innovation consistency test.'],
'ofy_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the optical flow sensor Y-axis innovation consistency test.'],
'on_ground_transition_time':[float('NaN'),'The time in seconds measured from startup that the EKF transitioned out of in-air mode. Set to a nan if a transition event is not detected.'],
'output_obs_ang_err_median':[float('NaN'),'Median in-flight value of the output observer angular error (rad)'],
'output_obs_pos_err_median':[float('NaN'),'Median in-flight value of the output observer position error (m)'],
'output_obs_vel_err_median':[float('NaN'),'Median in-flight value of the output observer velocity error (m/s)'],
'pos_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the velocity sensor consolidated innovation consistency test.'],
'pos_percentage_amber':[float('NaN'),'The percentage of in-flight position sensor consolidated innovation consistency test values > 0.5.'],
'pos_percentage_red':[float('NaN'),'The percentage of in-flight position sensor consolidated innovation consistency test values > 1.0.'],
'pos_sensor_status':['Pass','Position sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'pos_test_max':[float('NaN'),'The maximum in-flight value of the position sensor consolidated innovation consistency test ratio.'],
'pos_test_mean':[float('NaN'),'The mean in-flight value of the position sensor consolidated innovation consistency test ratio.'],
'tas_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the airspeed sensor innovation consistency test.'],
'tas_percentage_amber':[float('NaN'),'The percentage of in-flight airspeed sensor innovation consistency test values > 0.5.'],
'tas_percentage_red':[float('NaN'),'The percentage of in-flight airspeed sensor innovation consistency test values > 1.0.'],
'tas_sensor_status':['Pass','Airspeed sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'tas_test_max':[float('NaN'),'The maximum in-flight value of the airspeed sensor innovation consistency test ratio.'],
'tas_test_mean':[float('NaN'),'The mean in-flight value of the airspeed sensor innovation consistency test ratio.'],
'tilt_align_time':[float('NaN'),'The time in seconds measured from startup that the EKF completed the tilt alignment. A nan value indicates that the alignment had completed before logging started or alignment did not complete.'],
'vel_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the velocity sensor consolidated innovation consistency test.'],
'vel_percentage_amber':[float('NaN'),'The percentage of in-flight velocity sensor consolidated innovation consistency test values > 0.5.'],
'vel_percentage_red':[float('NaN'),'The percentage of in-flight velocity sensor consolidated innovation consistency test values > 1.0.'],
'vel_sensor_status':['Pass','Velocity sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'vel_test_max':[float('NaN'),'The maximum in-flight value of the velocity sensor consolidated innovation consistency test ratio.'],
'vel_test_mean':[float('NaN'),'The mean in-flight value of the velocity sensor consolidated innovation consistency test ratio.'],
'yaw_align_time':[float('NaN'),'The time in seconds measured from startup that the EKF completed the yaw alignment.'],
'yaw_fail_percentage':[float('NaN'),'The percentage of in-flight recorded failure events for the yaw sensor innovation consistency test.'],
'yaw_sensor_status':['Pass','Yaw sensor check summary. This sensor data can be sourced from the magnetometer or an external vision system. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
}
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 37 | 11979 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
dhwang99/statistics_introduction | probility/random_walk.py | 1 | 2204 | # encoding: utf8
import numpy as np
import matplotlib.pyplot as plt
import pdb
'''
随机游走.random walk
在 股票市场 预测用得比较多
example:
walk one unit to left by p, right by (1-p)
Y = sum(Xi), Xi, random Variable for each step
f(x) = p, x=-1, go left
1-p, x=1, go right
E(Xi) = -1 * p + 1*(1-p) = 1-2p
E(Xi^2) = 1*p + 1*(1-p) = 1
E(Xi) ^ 2 = 1 - 4p + 4p*p
V(Xi) = E(Xi^2) - E(Xi)^2 = 4p(1-p)
E(Y) = n(1-2p)
V(Y) = 4np(1-p)
从图里的结果看,当p >= 0.3时,几次试验的结果,变化都很大。不确定性太多了
可以对照 最后 100 步的图
从这个例子看,p = 0.5, 累积行为基本不可预测?
'''
'''
n: steps
p: probility
return expect, varible
'''
def EV_for_random_walk(n,p):
E = n*(1-2*p)
V = 4*n*p*(1-p)
return (E,V)
def random_walk(n,p):
pos = 0.
pos_list = []
for i in xrange(1, n+1):
rd_num = np.random.random()
if rd_num < p:
pos -= 1
else:
pos += 1
pos_list.append(pos)
return pos_list
def plot_random_walk(n, p, last_steps, repeat_times):
E_list = []
V_list = []
std1_list = []
std2_list = []
steps = range(1, n+1)
colors = ['b', 'g', 'y', 'k', 'c', 'm', 'y']
for i in steps:
E,V = EV_for_random_walk(i, p)
E_list.append(E)
V_list.append(V)
std1_list = np.sqrt(V_list) + E_list
std2_list = -np.sqrt(V_list) + E_list
plt.clf()
for i in xrange(repeat_times):
pos_list = random_walk(n, p)
color = colors[i%len(colors)]
#pdb.set_trace()
plt.plot(steps[-last_steps:], pos_list[-last_steps:], color=color, lw=0.5)
lw=2
plt.plot(steps[-last_steps:], E_list[-last_steps:], color='b', lw=lw)
plt.plot(steps[-last_steps:], std1_list[-last_steps:], color='r', lw=lw)
plt.plot(steps[-last_steps:], std2_list[-last_steps:], color='r', lw=lw)
plt.savefig('images/random_walk/laststep_%s_p%s.png' % (last_steps, p), format='png')
if __name__ == "__main__":
for i in range(1,6):
plot_random_walk(10000, 0.1 * i, 10000, 5)
plot_random_walk(10000, 0.1 * i, 100, 5)
| gpl-3.0 |
rkmaddox/mne-python | examples/time_frequency/time_frequency_global_field_power.py | 10 | 5307 | """
.. _ex-time-freq-global-field-power:
===========================================================
Explore event-related dynamics for specific frequency bands
===========================================================
The objective is to show you how to explore spectrally localized
effects. For this purpose we adapt the method described in
:footcite:`HariSalmelin1997` and use it on the somato dataset.
The idea is to track the band-limited temporal evolution
of spatial patterns by using the :term:`global field power` (GFP).
We first bandpass filter the signals and then apply a Hilbert transform. To
reveal oscillatory activity the evoked response is then subtracted from every
single trial. Finally, we rectify the signals prior to averaging across trials
by taking the magniude of the Hilbert.
Then the :term:`GFP` is computed as described in
:footcite:`EngemannGramfort2015`, using the sum of the
squares but without normalization by the rank.
Baselining is subsequently applied to make the :term:`GFP` comparable
between frequencies.
The procedure is then repeated for each frequency band of interest and
all :term:`GFPs<GFP>` are visualized. To estimate uncertainty, non-parametric
confidence intervals are computed as described in :footcite:`EfronHastie2016`
across channels.
The advantage of this method over summarizing the Space x Time x Frequency
output of a Morlet Wavelet in frequency bands is relative speed and, more
importantly, the clear-cut comparability of the spectral decomposition (the
same type of filter is used across all bands).
We will use this dataset: :ref:`somato-dataset`
References
----------
.. footbibliography::
""" # noqa: E501
# Authors: Denis A. Engemann <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import somato
from mne.baseline import rescale
from mne.stats import bootstrap_confidence_interval
###############################################################################
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# let's explore some frequency bands
iter_freqs = [
('Theta', 4, 7),
('Alpha', 8, 12),
('Beta', 13, 25),
('Gamma', 30, 45)
]
###############################################################################
# We create average power time courses for each frequency band
# set epoching parameters
event_id, tmin, tmax = 1, -1., 3.
baseline = None
# get the header to extract events
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
frequency_map = list()
for band, fmin, fmax in iter_freqs:
# (re)load the data to save memory
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', eog=True) # we just look at gradiometers
raw.load_data()
# bandpass filter
raw.filter(fmin, fmax, n_jobs=1, # use more jobs to speed up.
l_trans_bandwidth=1, # make sure filter params are the same
h_trans_bandwidth=1) # in each band and skip "auto" option.
# epoch
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline,
reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# remove evoked response
epochs.subtract_evoked()
# get analytic signal (envelope)
epochs.apply_hilbert(envelope=True)
frequency_map.append(((band, fmin, fmax), epochs.average()))
del epochs
del raw
###############################################################################
# Now we can compute the Global Field Power
# We can track the emergence of spatial patterns compared to baseline
# for each frequency band, with a bootstrapped confidence interval.
#
# We see dominant responses in the Alpha and Beta bands.
# Helper function for plotting spread
def stat_fun(x):
"""Return sum of squares."""
return np.sum(x ** 2, axis=0)
# Plot
fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
for ((freq_name, fmin, fmax), average), color, ax in zip(
frequency_map, colors, axes.ravel()[::-1]):
times = average.times * 1e3
gfp = np.sum(average.data ** 2, axis=0)
gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
ax.axhline(0, linestyle='--', color='grey', linewidth=2)
ci_low, ci_up = bootstrap_confidence_interval(average.data, random_state=0,
stat_fun=stat_fun)
ci_low = rescale(ci_low, average.times, baseline=(None, 0))
ci_up = rescale(ci_up, average.times, baseline=(None, 0))
ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)
ax.grid(True)
ax.set_ylabel('GFP')
ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
xy=(0.95, 0.8),
horizontalalignment='right',
xycoords='axes fraction')
ax.set_xlim(-1000, 3000)
axes.ravel()[-1].set_xlabel('Time [ms]')
| bsd-3-clause |
sssllliang/BuildingMachineLearningSystemsWithPython | ch11/demo_pca.py | 25 | 4333 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
from sklearn import linear_model, decomposition
from sklearn import lda
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
def plot_simple_demo_1():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = (x1 > 5) | (x2 > 5)
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_2():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_lda():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
lda_inst = lda.LDA(n_components=1)
Xtrans = lda_inst.fit_transform(X, good)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "lda_demo.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_simple_demo_1()
plot_simple_demo_2()
plot_simple_demo_lda()
| mit |
ypochien/TaiwanStockBSR | GetTWBSR.py | 1 | 8597 | # -*- coding: utf-8 -*-
import re
import urllib2,urllib
import sys
import csv
from datetime import datetime
import os
import threading
import Queue
from time import strftime
from time import sleep
from time import time
from types import *
import pandas as pd
from pandas import Series, DataFrame
# TSE : Taiwan Stock Exchange , 台灣證交所 (上市)
# OTC : Over-the-Counter , 櫃檯中心 (上櫃)
# BSR : Buy Sell Report , 分公司買賣進出表
class ThreadingDownloadBot(threading.Thread):
def __init__(self, pid, queue):
threading.Thread.__init__(self)
self.queue = queue
self.pid = pid
def run(self):
while(True):
try:
Code = self.queue.get()
except self.queue.Empty:
#pass
print 'self.queue.Empty'
else:
if self.pid < 5: # fast otc thread
sleep(0.5)
retry = 0
if ',' in Code:
retry = int(Code.split(',')[1])
Code = Code.split(',')[0]
print 'thread: id=%d, code=%s, queue_size=%d, retry=%d' % (self.pid, Code, self.queue.qsize(), retry)
res = self.RunImp(Code)
if res < 0:
retry +=1
if retry >= 3:
print 'fail3, ' + Code #print u'%s 下載三次失敗'%(Code)
else:
retryCode = '%s,%d'%(Code,retry)
#print 'retry: ' + retryCode
sleep(1) #有錯誤停1秒, sleep before putting into queue
self.queue.put(retryCode)
self.queue.task_done() #used by consumer thread, tells the queue that task is completed.
class DownloadTSEBot(ThreadingDownloadBot):
def __init__(self,pid,queue):
super(DownloadTSEBot, self).__init__(pid,queue)
self.name = "TSE BSR Download Bot."
def RunImp(self, Code):
# step1. GetMaxPage and POST data
def GetDateAndspPage(Code):
try:
base_url = 'http://bsr.twse.com.tw/bshtm/bsMenu.aspx'
req = urllib2.Request(base_url)
response = urllib2.urlopen(req)
html = response.read()
__VIEWSTATE = re.findall(u'id="__VIEWSTATE" value="(.*)" />',html)[0]
__EVENTVALIDATION = re.findall(u'id="__EVENTVALIDATION" value="(.*)" />',html)[0]
HiddenField_spDate = re.findall(u'id="sp_Date" name="sp_Date" style="display: none;">(.*)</span>',html)[0]
PostDataDict = {'__EVENTTARGET':''
, '__EVENTARGUMENT':''
,'HiddenField_page':'PAGE_BS'
,'txtTASKNO':Code
,'hidTASKNO':Code
,'__VIEWSTATE': __VIEWSTATE
,'__EVENTVALIDATION':__EVENTVALIDATION
,'HiddenField_spDate':HiddenField_spDate
,'btnOK':'%E6%9F%A5%E8%A9%A2'}
postData = urllib.urlencode( PostDataDict)
req = urllib2.Request( base_url , postData)
response = urllib2.urlopen( req)
html = response.read()
sp_ListCount = re.findall(u'<span id="sp_ListCount">(.*)</span>',html)[0]
return (HiddenField_spDate,sp_ListCount)
except Exception, e:
return (None,None)
# step 2. GetRawData
def GetBSRawData(Code,MaxPageNum):
try:
url = 'http://bsr.twse.com.tw/bshtm/bsContent.aspx?StartNumber=%s&FocusIndex=All_%s'%(Code,MaxPageNum)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
html = response.read()
return html #html contains N pages' report xhtml data
except Exception , e:
return None
# step 3. from xhtml data to csv file
def BSRawToCSV(BSRaw):
#取得資料表title
'''
<tr class='column_title_1'>
<td>序</td>
<td>證券商</td>
<td>成交單價</td>
<td>買進股數</td>
<td>賣出股數</td>
</tr>
'''
title_tr_pattern = u"<tr class='column_title_1'>(.*?)<\/tr>"
title_tr = re.compile(title_tr_pattern)
result_tr = title_tr.findall(BSRaw)
title_td_pattern = u'<td *>\B(.*?)</td>'
title_td = re.compile(title_td_pattern)
result_td = title_td.findall(result_tr[0])
#title = ','.join(title.decode('utf-8').encode('cp950') for title in result_td)
title = ','.join(title for title in result_td)
#取得各分公司買賣內容
td = '''
<td class='column_value_center'> 1</td>
<td class='column_value_left'> 1233 彰銀台中</td>
<td class='column_value_right'> 8.65</td>
<td class='column_value_right'> 0</td>
<td class='column_value_right'> 20,000</td>
'''
content_tr_pattern = u"<tr class='column_value_price_[23]'>(.*?)<\/tr>"
content_tr = re.compile(content_tr_pattern)
result_tr_content = content_tr.findall(BSRaw)
content_td_pattern = u"<td \S*>(.*?)</td>"
content_td = re.compile(content_td_pattern)
content_list = []
for tr in result_tr_content:
result_td = content_td.findall(tr)
row = ','.join(td.replace(',','').strip() for td in result_td if td.strip()[0] not in ['<','&'])
if len(row) == 0:
continue
#content_list.append(row.decode('utf-8').encode('cp950'))
content_list.append(row)
sortedlist = sorted(content_list,key = lambda s: int(s.split(',')[0]))
#將Title加入資料首列
sortedlist.insert(0,title)
return sortedlist
def CSVToFile(CSVData,filename):
with open('BSR/'+filename, 'wb') as csvfile:
content = '\n'.join(row for row in CSVData)
csvfile.write(content)
self.RawBSR = "TSE"
self.date, MaxPageNum = GetDateAndspPage(Code)
if None == MaxPageNum or "" == MaxPageNum:
return -1
BSRawData = GetBSRawData(Code, MaxPageNum)
if None == BSRawData:
return -2
filename = "%s_%s.csv"%(Code,self.date)
CSVData = BSRawToCSV(BSRawData)
CSVToFile(CSVData, filename)
return 0
class DownloadOTCBot(ThreadingDownloadBot):
def __init__(self, pid, queue, otcdate):
super(DownloadOTCBot, self).__init__(pid, queue)
self.name = "OTC BSR Download Bot."
self.date = otcdate
def RunImp(self,Code):
def DownloadOTC(Code,filename,otcdate):
try:
base_url = r'http://www.gretai.org.tw/web/stock/aftertrading/broker_trading/download_ALLCSV.php?curstk={}&stk_date={}'.format(Code,otcdate)
response = urllib2.urlopen(base_url)
#html = response.read()
html = response.read().decode('cp950').encode('utf-8')
except Exception, e:
return -1
with open('BSR/'+filename, 'wb') as csvfile:
content = '\n'.join(row for row in html.split(',,')[1:])
csvfile.write(content)
return 0
#entity in RunImp() of OTC thread
self.RawBSR = "OTC"
#otcDate = getOTCDate(Code) #move this part outside the thread
otcDate = self.date
if otcDate == None:
return -2
filename = "%s_%d%s.csv"%(Code, int(otcDate[0:3])+1911, otcDate[3:])
ret = DownloadOTC(Code, filename, otcDate)
if ret < 0:
return ret
return 0
def getCodeListFromCSV(filename):
CodeList = []
with open(filename,'r') as f:
for row in f:
code = row.split(',')[0]
CodeList.append(code)
return CodeList
def getDateForOTC(CodeDict):
for Code in CodeDict['OTC']:
if Code[0].isdigit():
#print Code
baseUrl = "http://www.gretai.org.tw/web/stock/aftertrading/broker_trading/brokerBS.php"
postDataDict = {
'stk_code' : Code
}
postData = urllib.urlencode( postDataDict)
req = urllib2.Request( baseUrl , postData)
response = urllib2.urlopen(req)
html = response.read()
date_list = re.findall(u'<input type="hidden" id="stk_date" name="stk_date" value=(.*)>',html)
for date in date_list:
print 'get date @' + date
return date
return None
if __name__ == '__main__':
if not os.path.exists('BSR'):
os.makedirs('BSR')
CodeDict = {}
CodeDict['TSE'] = getCodeListFromCSV('TSECode.csv')
CodeDict['OTC'] = getCodeListFromCSV('OTCCode.csv')
print 'TSE:%d, OTC:%d' % (len(CodeDict['TSE']), len(CodeDict['OTC']))
num_thread_otc = 5
num_thread_tse = 5
#preprocessing for otc
otcdate = getDateForOTC(CodeDict)
starttime = time()
OTCqueue = Queue.Queue()
for i in range(num_thread_otc):
t = DownloadOTCBot(i, OTCqueue, otcdate)
t.setDaemon(True) #the thread t is terminated when the main thread ends.
t.start()
for Code in CodeDict['OTC'][:]:
if Code[0].isdigit(): #some stock have character id at tail, like '2833A'
OTCqueue.put(Code)
OTCqueue.join()
endtime = time()
print 'end of otc, ' + 'time: ' + str(endtime - starttime)
starttime = time()
TSEqueue = Queue.Queue()
for i in range(num_thread_tse):
t = DownloadTSEBot(i+num_thread_otc, TSEqueue) #shifted thread id from otc group
t.setDaemon(True)
t.start()
for Code in CodeDict['TSE'][:]:
if Code[0].isdigit():
TSEqueue.put(Code)
# TSEqueue.put('1469')
TSEqueue.join() #Blocks until all items in the queue have been gotten and processed.
endtime = time()
print 'end of tse, ' + 'time: ' + str(endtime - starttime)
| mit |
jakevdp/bokeh | examples/plotting/server/burtin.py | 3 | 4773 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
import pandas as pd
from bokeh.plotting import *
from six.moves import cStringIO as StringIO
from math import log, sqrt
from collections import OrderedDict
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics), skiprows=1, skipinitialspace=True)
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
output_server("burtin")
hold()
x = np.zeros(len(df))
y = np.zeros(len(df))
figure(plot_width=width, plot_height=height, title="",
tools="pan,wheel_zoom,box_zoom,reset,previewsave",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color=None,
background_fill="#f0e1d2", border_fill="#f0e1d2")
line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index*big_angle
colors = [gram_color[gram] for gram in df.gram]
annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
annular_wedge(
x, y, inner_radius, rad(df.penicillin), -big_angle+angles + 5*small_angle, -big_angle+angles+6*small_angle, color=drug_color['Penicillin'],
)
annular_wedge(
x, y, inner_radius, rad(df.streptomycin), -big_angle+angles + 3*small_angle, -big_angle+angles+4*small_angle, color=drug_color['Streptomycin'],
)
annular_wedge(
x, y, inner_radius, rad(df.neomycin), -big_angle+angles + 1*small_angle, -big_angle+angles+2*small_angle, color=drug_color['Neomycin'],
)
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
circle(x, y, radius=radii, fill_color=None, line_color="white")
text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]], angle=0, text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
annular_wedge(
x, y, inner_radius-10, outer_radius+10, -big_angle+angles, -big_angle+angles, color="black",
)
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
text(xr, yr, df.bacteria, angle=label_angle, text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
text([-30, -30], [-370, -390], text=["Gram-" + x for x in gram_color.keys()], angle=0, text_font_size="7pt", text_align="left", text_baseline="middle")
rect([-40, -40, -40], [18, 0, -18], width=30, height=13, color=list(drug_color.values()))
text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()), angle=0, text_font_size="9pt", text_align="left", text_baseline="middle")
xgrid().grid_line_color = None
ygrid().grid_line_color = None
show()
| bsd-3-clause |
schreiberx/sweet | tests/70_program_swe_plane_spatial_convergence/postprocessing_convergence_test.py | 1 | 3895 | #! /usr/bin/env python3
import sys
import math
from mule_local.JobMule import *
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
groups = [
'runtime.timestepping_method',
'runtime.timestepping_order',
]
tagnames_y = [
'output.error_end_linf_h_pert',
]
j = JobsData(verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(" + "+key)
for tagname_y in tagnames_y:
print("*"*80)
print("Processing tagname "+tagname_y)
print("*"*80)
tagname_x = 'runtime.space_res_physical'
if True:
"""
Plotting format
"""
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
meta_attribute_name = 'runtime.timestepping_order',
# data_filter = data_filter
)
for group_name, group_data in d.get_data_float().items():
print("*"*80)
print("Group: "+group_name)
if len(group_data['meta_values']) == 0:
raise Exception("No data in group found")
prev_value = -1.0
conv = '-'
convergence_order = None
for (x, y, convergence_order_) in zip(group_data['x_values'], group_data['y_values'], group_data['meta_values']):
if prev_value > 0:
#conv = y/prev_value
conv = prev_value/y
elif prev_value == 0:
conv = '[error=0]'
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv))
prev_value = y
if convergence_order != None:
if convergence_order != convergence_order_:
raise Exception("Convergence order mismatch!!!")
convergence_order = convergence_order_
print("")
print("Testing convergence")
# Cubic for SL with cubic spatial interpolation
if '_sl_' in group_name:
conv_test_range_end = len(group_data['x_values'])
conv_test_range_start = conv_test_range_end-4
target_conv = 16
error_tolerance_convergence = 0.2
else:
# Convergence order is stored in meta value
conv_test_range_end = len(group_data['x_values'])
conv_test_range_start = conv_test_range_end-4
target_conv = pow(2.0, meta)
error_tolerance_convergence = 0.25
print(" + range start/end: "+str(conv_test_range_start)+", "+str(conv_test_range_end))
print(" + error_tolerance_convergence: "+str(error_tolerance_convergence))
if len(group_data['meta_values']) < conv_test_range_end:
raise Exception("Not enough samples to run convergence test")
for i in range(len(group_data['meta_values'])):
if group_data['meta_values'][i] != group_data['meta_values'][0]:
raise Exception("FATAL: Different convergence orders in same test")
prev_value = -1.0
conv = '-'
for i in range(conv_test_range_start, conv_test_range_end):
x = group_data['x_values'][i]
y = group_data['y_values'][i]
meta = group_data['meta_values'][i]
if prev_value > 0:
#conv = y/prev_value
conv = prev_value/y
elif prev_value == 0:
conv = '[error=0]'
error_convergence = '-'
if isinstance(conv, float):
error_convergence = abs(conv - target_conv)/target_conv
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv)+"\terror: "+str(error_convergence))
if error_convergence != '-':
if error_convergence > error_tolerance_convergence:
print("Error: "+str(error_convergence))
raise Exception("Convergence exceeds tolerance of "+str(error_tolerance_convergence))
prev_value = y
print("[OK]")
print("*"*80)
print("Convergence tests successful")
print("*"*80)
| mit |
nmartensen/pandas | pandas/core/base.py | 1 | 39480 | """
Base and utility classes for pandas objects.
"""
import warnings
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.core.dtypes.common import (
is_object_dtype,
is_list_like,
is_scalar,
is_datetimelike)
from pandas.util._validators import validate_bool_kwarg
from pandas.core import common as com
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
from pandas.compat import PYPY
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.common import AbstractMethodError, _maybe_box_datetimelike
from pandas.core.accessor import DirNamesMixin
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for a object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# dict lookup instead of getattr as getattr is false for getter
# which error
if getattr(self, "__frozen", False) and not \
(key in type(self).__dict__ or key == "_cache"):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class PandasDelegate(PandasObject):
""" an abstract base class for delegating methods/properties """
@classmethod
def _make_accessor(cls, data):
raise AbstractMethodError("_make_accessor should be implemented"
"by subclass and return an instance"
"of `cls`.")
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError("You cannot access the "
"property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
"""
add accessors to cls from the delegate class
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
acccessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter,
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
class AccessorProperty(object):
"""Descriptor for implementing accessor properties like Series.str
"""
def __init__(self, accessor_cls, construct_accessor=None):
self.accessor_cls = accessor_cls
self.construct_accessor = (construct_accessor or
accessor_cls._make_accessor)
self.__doc__ = accessor_cls.__doc__
def __get__(self, instance, owner=None):
if instance is None:
# this ensures that Series.str.<method> is well defined
return self.accessor_cls
return self.construct_accessor(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any([isinstance(r, ABCSeries)
for r in compat.itervalues(result)])
def is_any_frame():
# return a boolean if we have *any* nested series
return any([isinstance(r, ABCDataFrame)
for r in compat.itervalues(result)])
if isinstance(result, list):
return concat(result, keys=keys, axis=1), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com._get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for col in obj:
try:
colg = self._gotitem(col, ndim=1, subset=obj[col])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class GroupByMixin(object):
""" provide the groupby facilities to the mixed object """
@staticmethod
def _dispatch(name, *args, **kwargs):
""" dispatch to apply """
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = dict([(attr, getattr(self, attr))
for attr in self._attributes])
self = self.__class__(subset,
groupby=self._groupby[key],
parent=self,
**kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
class IndexOpsMixin(object):
""" common ops mixin to support a unified inteface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
return self._values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
return self._values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
return self.values.base
@property
def _values(self):
""" the internal implementation """
return self.values
@property
def empty(self):
return not self.size
def max(self):
""" The maximum value of the object """
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
""" The minimum value of the object """
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self):
return [_maybe_box_datetimelike(x) for x in self._values]
else:
return self._values.tolist()
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return iter(self.tolist())
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isna(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Returns object containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
_shared_docs['unique'] = (
"""
Return unique values in the object. Uniques are returned in order
of appearance, this does NOT sort. Hash table-based unique.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
See Also
--------
unique
Index.unique
Series.unique
""")
@Appender(_shared_docs['unique'] % _indexops_doc_kwargs)
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.values)
return v
def factorize(self, sort=False, na_sentinel=-1):
"""
Encode the object as an enumerated type or categorical variable
Parameters
----------
sort : boolean, default False
Sort by values
na_sentinel: int, default -1
Value to mark "not found"
Returns
-------
labels : the indexer to the original array
uniques : the unique Index
"""
from pandas.core.algorithms import factorize
return factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk' ])
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'])
array([1])
>>> x.searchsorted(['bread', 'eggs'])
array([1, 4])
>>> x.searchsorted(['bread', 'eggs'], side='right')
array([3, 4]) # eggs before milk
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
_shared_docs['drop_duplicates'] = (
"""Return %(klass)s with duplicate values removed
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
%(inplace)s
Returns
-------
deduplicated : %(klass)s
""")
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
_shared_docs['duplicated'] = (
"""Return boolean %(duplicated)s denoting duplicate values
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : %(duplicated)s
""")
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| bsd-3-clause |
TheCentralLimit/TopSecret | src/chis_code.py | 1 | 1237 | # -*- coding: utf-8 -*-
"""
Code written by Chi Nguyen.
"""
from __future__ import division, print_function
from numpy.polynomial import polynomial as P
from os import path
#import matplotlib
import emcee
import corner
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
#matplotlib.use('Agg')
# group modulus
import density as ds
import mcmc
# Reproducible results!
np.random.seed(123)
def chis_code(x_in,y_in,yerr_in,output_directory):
# Transform M_c into log-space.
index = check_nonzero(y_in) # exclude y = 0
x = x_in[index>0]
y = y_in[index>0]
yerr = yerr_in[index>0]
print(len(yerr))
degree = 9 # degree of polynomial
# least square fitting
lam_ls = mcmc.least_square(x,y,yerr,degree,output_directory)
lam_ml = mcmc.maximum_likelihood(x,y,yerr,degree,lam_ls,output_directory)
lam_mcmc = mcmc.MCMC(x,y,yerr,degree,lam_ml,output_directory)
return lam_mcmc
def check_nonzero(y_in):
index = (y_in[:]>10**(-15.0)).astype(int)
return index
def yerr_log(y,yerr):
yerr_return = (np.log10(y+yerr) - np.log10(y-yerr))
pl.plot(y,yerr_return)
pl.show
return yerr_return
| mit |
ltiao/scikit-learn | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
sssllliang/BuildingMachineLearningSystemsWithPython | ch06/01_start.py | 22 | 3955 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains multinomial Naive Bayes on the tweet corpus
# to find two different results:
# - How well can we distinguis positive from negative tweets?
# - How well can we detect whether a tweet contains sentiment at all?
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
def create_ngram_model():
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
return pipeline
def train_model(clf_factory, X, Y, name="NB ngram", plot=False):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
if plot:
plot_pr(pr_scores[median], name, "01", precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
train_model(create_ngram_model, X, Y, name="sent vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs rest", plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(create_ngram_model, X, Y, name="neg vs rest", plot=True)
print("time spent:", time.time() - start_time)
| mit |
benslice/ggplot | ggplot/components/smoothers.py | 12 | 2576 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from pandas.lib import Timestamp
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
_isdate = lambda x: isinstance(x, Timestamp)
SPAN = 2/3.
ALPHA = 0.05 # significance level for confidence interval
def snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x, y = map(plot_friendly, [x,y])
if _isdate(x[0]):
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(snakify, summary_names))
fittedvalues = df['predicted_value']
predict_mean_se = df['std_error_mean_predict']
predict_mean_ci_low = df['mean_ci_95%_low']
predict_mean_ci_upp = df['mean_ci_95%_upp']
predict_ci_low = df['predict_ci_95%_low']
predict_ci_upp = df['predict_ci_95%_upp']
return (x, fittedvalues.tolist(), predict_mean_ci_low.tolist(),
predict_mean_ci_upp.tolist())
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(plot_friendly, [x,y])
if _isdate(x[0]):
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y).tolist()
y2 = pd.Series(upper * std + y).tolist()
return (x, y, y1, y2)
def mavg(x,y, window):
"compute moving average"
x, y = map(plot_friendly, [x,y])
if _isdate(x[0]):
x = np.array([i.toordinal() for i in x])
std_err = pd.rolling_std(y, window)
y = pd.rolling_mean(y, window)
y1 = y - std_err
y2 = y + std_err
return (x, y, y1.tolist(), y2.tolist())
| bsd-2-clause |
cpcloud/ibis | ibis/impala/client.py | 1 | 60067 | import io
import operator
import re
import threading
import time
import traceback
import weakref
from collections import deque
from posixpath import join as pjoin
import numpy as np
import pandas as pd
from pkg_resources import parse_version
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.util as util
from ibis.client import Database, DatabaseEntity, Query, SQLClient
from ibis.config import options
from ibis.filesystems import HDFS, WebHDFS
from ibis.impala import ddl, udf
from ibis.impala.compat import HS2Error, ImpylaError, impyla
from ibis.impala.compiler import ImpalaDialect, build_ast
from ibis.sql.compiler import DDL, DML
from ibis.util import log
class ImpalaDatabase(Database):
def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
)
def list_udfs(self, like=None):
return self.client.list_udfs(
like=self._qualify_like(like), database=self.name
)
def list_udas(self, like=None):
return self.client.list_udas(
like=self._qualify_like(like), database=self.name
)
class ImpalaConnection:
"""
Database connection wrapper
"""
def __init__(self, pool_size=8, database='default', **params):
self.params = params
self.database = database
self.lock = threading.Lock()
self.options = {}
self.max_pool_size = pool_size
self._connections = weakref.WeakSet()
self.connection_pool = deque(maxlen=pool_size)
self.connection_pool_size = 0
def set_options(self, options):
self.options.update(options)
def close(self):
"""
Close all open Impyla sessions
"""
for impyla_connection in self._connections:
impyla_connection.close()
self._connections.clear()
self.connection_pool.clear()
def set_database(self, name):
self.database = name
def disable_codegen(self, disabled=True):
key = 'DISABLE_CODEGEN'
if disabled:
self.options[key] = '1'
elif key in self.options:
del self.options[key]
def execute(self, query):
if isinstance(query, (DDL, DML)):
query = query.compile()
cursor = self._get_cursor()
self.log(query)
try:
cursor.execute(query)
except Exception:
cursor.release()
self.error(
'Exception caused by {}: {}'.format(
query, traceback.format_exc()
)
)
raise
return cursor
def log(self, msg):
log(msg)
def error(self, msg):
self.log(msg)
def fetchall(self, query):
with self.execute(query) as cur:
results = cur.fetchall()
return results
def _get_cursor(self):
try:
cursor = self.connection_pool.popleft()
except IndexError: # deque is empty
if self.connection_pool_size < self.max_pool_size:
return self._new_cursor()
raise com.InternalError('Too many concurrent / hung queries')
else:
if (
cursor.database != self.database
or cursor.options != self.options
):
return self._new_cursor()
cursor.released = False
return cursor
def _new_cursor(self):
params = self.params.copy()
con = impyla.connect(database=self.database, **params)
self._connections.add(con)
# make sure the connection works
cursor = con.cursor(user=params.get('user'), convert_types=True)
cursor.ping()
wrapper = ImpalaCursor(
cursor, self, con, self.database, self.options.copy()
)
wrapper.set_options()
return wrapper
def ping(self):
self._get_cursor()._cursor.ping()
def release(self, cur):
self.connection_pool.append(cur)
class ImpalaCursor:
def __init__(self, cursor, con, impyla_con, database, options):
self._cursor = cursor
self.con = con
self.impyla_con = impyla_con
self.database = database
self.options = options
self.released = False
self.con.connection_pool_size += 1
def __del__(self):
try:
self._close_cursor()
except Exception:
pass
with self.con.lock:
self.con.connection_pool_size -= 1
def _close_cursor(self):
try:
self._cursor.close()
except HS2Error as e:
# connection was closed elsewhere
already_closed_messages = [
'invalid query handle',
'invalid session',
]
for message in already_closed_messages:
if message in e.args[0].lower():
break
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.release()
def set_options(self):
for k, v in self.options.items():
query = 'SET {} = {!r}'.format(k, v)
self._cursor.execute(query)
@property
def description(self):
return self._cursor.description
def release(self):
if not self.released:
self.con.release(self)
self.released = True
def execute(self, stmt):
self._cursor.execute_async(stmt)
self._wait_synchronous()
def _wait_synchronous(self):
# Wait to finish, but cancel if KeyboardInterrupt
from impala.hiveserver2 import OperationalError
loop_start = time.time()
def _sleep_interval(start_time):
elapsed = time.time() - start_time
if elapsed < 0.05:
return 0.01
elif elapsed < 1.0:
return 0.05
elif elapsed < 10.0:
return 0.1
elif elapsed < 60.0:
return 0.5
return 1.0
cur = self._cursor
try:
while True:
state = cur.status()
if self._cursor._op_state_is_error(state):
raise OperationalError("Operation is in ERROR_STATE")
if not cur._op_state_is_executing(state):
break
time.sleep(_sleep_interval(loop_start))
except KeyboardInterrupt:
print('Canceling query')
self.cancel()
raise
def is_finished(self):
return not self.is_executing()
def is_executing(self):
return self._cursor.is_executing()
def cancel(self):
self._cursor.cancel_operation()
def fetchone(self):
return self._cursor.fetchone()
def fetchall(self, columnar=False):
if columnar:
return self._cursor.fetchcolumnar()
else:
return self._cursor.fetchall()
class ImpalaQuery(Query):
def _fetch(self, cursor):
batches = cursor.fetchall(columnar=True)
names = [x[0] for x in cursor.description]
df = _column_batches_to_dataframe(names, batches)
# Ugly Hack for PY2 to ensure unicode values for string columns
if self.expr is not None:
# in case of metadata queries there is no expr and
# self.schema() would raise an exception
return self.schema().apply_to(df)
return df
def _column_batches_to_dataframe(names, batches):
cols = {}
for name, chunks in zip(names, zip(*[b.columns for b in batches])):
cols[name] = _chunks_to_pandas_array(chunks)
return pd.DataFrame(cols, columns=names)
def _chunks_to_pandas_array(chunks):
total_length = 0
have_nulls = False
for c in chunks:
total_length += len(c)
have_nulls = have_nulls or c.nulls.any()
type_ = chunks[0].data_type
numpy_type = _HS2_TTypeId_to_dtype[type_]
def fill_nonnull(target, chunks):
pos = 0
for c in chunks:
target[pos : pos + len(c)] = c.values
pos += len(c.values)
def fill(target, chunks, na_rep):
pos = 0
for c in chunks:
nulls = c.nulls.copy()
nulls.bytereverse()
bits = np.frombuffer(nulls.tobytes(), dtype='u1')
mask = np.unpackbits(bits).view(np.bool_)
k = len(c)
dest = target[pos : pos + k]
dest[:] = c.values
dest[mask[:k]] = na_rep
pos += k
if have_nulls:
if numpy_type in ('bool', 'datetime64[ns]'):
target = np.empty(total_length, dtype='O')
na_rep = np.nan
elif numpy_type.startswith('int'):
target = np.empty(total_length, dtype='f8')
na_rep = np.nan
else:
target = np.empty(total_length, dtype=numpy_type)
na_rep = np.nan
fill(target, chunks, na_rep)
else:
target = np.empty(total_length, dtype=numpy_type)
fill_nonnull(target, chunks)
return target
_HS2_TTypeId_to_dtype = {
'BOOLEAN': 'bool',
'TINYINT': 'int8',
'SMALLINT': 'int16',
'INT': 'int32',
'BIGINT': 'int64',
'TIMESTAMP': 'datetime64[ns]',
'FLOAT': 'float32',
'DOUBLE': 'float64',
'STRING': 'object',
'DECIMAL': 'object',
'BINARY': 'object',
'VARCHAR': 'object',
'CHAR': 'object',
}
class ImpalaDatabaseTable(ops.DatabaseTable):
pass
class ImpalaTable(ir.TableExpr, DatabaseEntity):
"""
References a physical table in the Impala-Hive metastore
"""
@property
def _qualified_name(self):
return self.op().args[0]
@property
def _unqualified_name(self):
return self._match_name()[1]
@property
def _client(self):
return self.op().source
def _match_name(self):
m = ddl.fully_qualified_re.match(self._qualified_name)
if not m:
raise com.IbisError(
'Cannot determine database name from {0}'.format(
self._qualified_name
)
)
db, quoted, unquoted = m.groups()
return db, quoted or unquoted
@property
def _database(self):
return self._match_name()[0]
def compute_stats(self, incremental=False):
"""
Invoke Impala COMPUTE STATS command to compute column, table, and
partition statistics.
See also ImpalaClient.compute_stats
"""
return self._client.compute_stats(
self._qualified_name, incremental=incremental
)
def invalidate_metadata(self):
self._client.invalidate_metadata(self._qualified_name)
def refresh(self):
self._client.refresh(self._qualified_name)
def metadata(self):
"""
Return parsed results of DESCRIBE FORMATTED statement
Returns
-------
meta : TableMetadata
"""
return self._client.describe_formatted(self._qualified_name)
describe_formatted = metadata
def files(self):
"""
Return results of SHOW FILES statement
"""
return self._client.show_files(self._qualified_name)
def drop(self):
"""
Drop the table from the database
"""
self._client.drop_table_or_view(self._qualified_name)
def truncate(self):
self._client.truncate_table(self._qualified_name)
def insert(
self,
obj=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""
Insert into Impala table. Wraps ImpalaClient.insert
Parameters
----------
obj : TableExpr or pandas DataFrame
overwrite : boolean, default False
If True, will replace existing contents of table
partition : list or dict, optional
For partitioned tables, indicate the partition that's being inserted
into, either with an ordered list of partition keys or a dict of
partition field name to value. For example for the partition
(year=2007, month=7), this can be either (2007, 7) or {'year': 2007,
'month': 7}.
validate : boolean, default True
If True, do more rigorous validation that schema of table being
inserted is compatible with the existing table
Examples
--------
>>> t.insert(table_expr) # doctest: +SKIP
# Completely overwrite contents
>>> t.insert(table_expr, overwrite=True) # doctest: +SKIP
"""
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, expr = write_temp_dataframe(self._client, obj)
else:
expr = obj
if values is not None:
raise NotImplementedError
if validate:
existing_schema = self.schema()
insert_schema = expr.schema()
if not insert_schema.equals(existing_schema):
_validate_compatible(insert_schema, existing_schema)
if partition is not None:
partition_schema = self.partition_schema()
partition_schema_names = frozenset(partition_schema.names)
expr = expr.projection(
[
column
for column in expr.columns
if column not in partition_schema_names
]
)
else:
partition_schema = None
ast = build_ast(expr, ImpalaDialect.make_context())
select = ast.queries[0]
statement = ddl.InsertSelect(
self._qualified_name,
select,
partition=partition,
partition_schema=partition_schema,
overwrite=overwrite,
)
return self._execute(statement)
def load_data(self, path, overwrite=False, partition=None):
"""
Wraps the LOAD DATA DDL statement. Loads data into an Impala table by
physically moving data files.
Parameters
----------
path : string
overwrite : boolean, default False
Overwrite the existing data in the entire table or indicated
partition
partition : dict, optional
If specified, the partition must already exist
Returns
-------
query : ImpalaQuery
"""
if partition is not None:
partition_schema = self.partition_schema()
else:
partition_schema = None
stmt = ddl.LoadData(
self._qualified_name,
path,
partition=partition,
partition_schema=partition_schema,
)
return self._execute(stmt)
@property
def name(self):
return self.op().name
def rename(self, new_name, database=None):
"""
Rename table inside Impala. References to the old table are no longer
valid.
Parameters
----------
new_name : string
database : string
Returns
-------
renamed : ImpalaTable
"""
m = ddl.fully_qualified_re.match(new_name)
if not m and database is None:
database = self._database
statement = ddl.RenameTable(
self._qualified_name, new_name, new_database=database
)
self._client._execute(statement)
op = self.op().change_name(statement.new_qualified_name)
return type(self)(op)
def _execute(self, stmt):
return self._client._execute(stmt)
@property
def is_partitioned(self):
"""
True if the table is partitioned
"""
return self.metadata().is_partitioned
def partition_schema(self):
"""
For partitioned tables, return the schema (names and types) for the
partition columns
Returns
-------
partition_schema : ibis Schema
"""
schema = self.schema()
name_to_type = dict(zip(schema.names, schema.types))
result = self.partitions()
partition_fields = []
for x in result.columns:
if x not in name_to_type:
break
partition_fields.append((x, name_to_type[x]))
pnames, ptypes = zip(*partition_fields)
return sch.Schema(pnames, ptypes)
def add_partition(self, spec, location=None):
"""
Add a new table partition, creating any new directories in HDFS if
necessary.
Partition parameters can be set in a single DDL statement, or you can
use alter_partition to set them after the fact.
Returns
-------
None (for now)
"""
part_schema = self.partition_schema()
stmt = ddl.AddPartition(
self._qualified_name, spec, part_schema, location=location
)
return self._execute(stmt)
def alter(
self,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
"""
Change setting and parameters of the table.
Parameters
----------
location : string, optional
For partitioned tables, you may want the alter_partition function
format : string, optional
tbl_properties : dict, optional
serde_properties : dict, optional
Returns
-------
None (for now)
"""
def _run_ddl(**kwds):
stmt = ddl.AlterTable(self._qualified_name, **kwds)
return self._execute(stmt)
return self._alter_table_helper(
_run_ddl,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
def set_external(self, is_external=True):
"""
Toggle EXTERNAL table property.
"""
self.alter(tbl_properties={'EXTERNAL': is_external})
def alter_partition(
self,
spec,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
"""
Change setting and parameters of an existing partition
Parameters
----------
spec : dict or list
The partition keys for the partition being modified
location : string, optional
format : string, optional
tbl_properties : dict, optional
serde_properties : dict, optional
Returns
-------
None (for now)
"""
part_schema = self.partition_schema()
def _run_ddl(**kwds):
stmt = ddl.AlterPartition(
self._qualified_name, spec, part_schema, **kwds
)
return self._execute(stmt)
return self._alter_table_helper(
_run_ddl,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
def _alter_table_helper(self, f, **alterations):
results = []
for k, v in alterations.items():
if v is None:
continue
result = f(**{k: v})
results.append(result)
return results
def drop_partition(self, spec):
"""
Drop an existing table partition
"""
part_schema = self.partition_schema()
stmt = ddl.DropPartition(self._qualified_name, spec, part_schema)
return self._execute(stmt)
def partitions(self):
"""
Return a pandas.DataFrame giving information about this table's
partitions. Raises an exception if the table is not partitioned.
Returns
-------
partitions : pandas.DataFrame
"""
return self._client.list_partitions(self._qualified_name)
def stats(self):
"""
Return results of SHOW TABLE STATS as a DataFrame. If not partitioned,
contains only one row
Returns
-------
stats : pandas.DataFrame
"""
return self._client.table_stats(self._qualified_name)
def column_stats(self):
"""
Return results of SHOW COLUMN STATS as a pandas DataFrame
Returns
-------
column_stats : pandas.DataFrame
"""
return self._client.column_stats(self._qualified_name)
class ImpalaClient(SQLClient):
"""
An Ibis client interface that uses Impala
"""
dialect = ImpalaDialect
database_class = ImpalaDatabase
query_class = ImpalaQuery
table_class = ImpalaDatabaseTable
table_expr_class = ImpalaTable
def __init__(self, con, hdfs_client=None, **params):
import hdfs
self.con = con
if isinstance(hdfs_client, hdfs.Client):
hdfs_client = WebHDFS(hdfs_client)
elif hdfs_client is not None and not isinstance(hdfs_client, HDFS):
raise TypeError(hdfs_client)
self._hdfs = hdfs_client
self._kudu = None
self._temp_objects = weakref.WeakSet()
self._ensure_temp_db_exists()
def _build_ast(self, expr, context):
return build_ast(expr, context)
def _get_hdfs(self):
if self._hdfs is None:
raise com.IbisError(
'No HDFS connection; must pass connection '
'using the hdfs_client argument to '
'ibis.impala.connect'
)
return self._hdfs
def _set_hdfs(self, hdfs):
if not isinstance(hdfs, HDFS):
raise TypeError('must be HDFS instance')
self._hdfs = hdfs
hdfs = property(fget=_get_hdfs, fset=_set_hdfs)
@property
def kudu(self):
from ibis.impala.kudu_support import KuduImpalaInterface
if self._kudu is None:
self._kudu = KuduImpalaInterface(self)
return self._kudu
def close(self):
"""
Close Impala connection and drop any temporary objects
"""
for obj in self._temp_objects:
try:
obj.drop()
except HS2Error:
pass
self.con.close()
def disable_codegen(self, disabled=True):
"""
Turn off or on LLVM codegen in Impala query execution
Parameters
----------
disabled : boolean, default True
To disable codegen, pass with no argument or True. To enable codegen,
pass False
"""
self.con.disable_codegen(disabled)
def log(self, msg):
log(msg)
def _fully_qualified_name(self, name, database):
if ddl._is_fully_qualified(name):
return name
database = database or self.current_database
return '{0}.`{1}`'.format(database, name)
def list_tables(self, like=None, database=None):
"""
List tables in the current (or indicated) database. Like the SHOW
TABLES command in the impala-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
database : string, default None
If not passed, uses the current/default database
Returns
-------
tables : list of strings
"""
statement = 'SHOW TABLES'
if database:
statement += ' IN {0}'.format(database)
if like:
m = ddl.fully_qualified_re.match(like)
if m:
database, quoted, unquoted = m.groups()
like = quoted or unquoted
return self.list_tables(like=like, database=database)
statement += " LIKE '{0}'".format(like)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return result
def _get_list(self, cur):
tuples = cur.fetchall()
return list(map(operator.itemgetter(0), tuples))
def set_database(self, name):
"""
Set the default database scope for client
"""
self.con.set_database(name)
def exists_database(self, name):
"""
Checks if a given database exists
Parameters
----------
name : string
Database name
Returns
-------
if_exists : boolean
"""
return bool(self.list_databases(like=name))
def create_database(self, name, path=None, force=False):
"""
Create a new Impala database
Parameters
----------
name : string
Database name
path : string, default None
HDFS path where to store the database data; otherwise uses Impala
default
"""
if path:
# explicit mkdir ensures the user own the dir rather than impala,
# which is easier for manual cleanup, if necessary
self.hdfs.mkdir(path)
statement = ddl.CreateDatabase(name, path=path, can_exist=force)
return self._execute(statement)
def drop_database(self, name, force=False):
"""Drop an Impala database.
Parameters
----------
name : string
Database name
force : bool, default False
If False and there are any tables in this database, raises an
IntegrityError
"""
if not force or self.exists_database(name):
tables = self.list_tables(database=name)
udfs = self.list_udfs(database=name)
udas = self.list_udas(database=name)
else:
tables = []
udfs = []
udas = []
if force:
for table in tables:
self.log('Dropping {0}'.format('{0}.{1}'.format(name, table)))
self.drop_table_or_view(table, database=name)
for func in udfs:
self.log(
'Dropping function {0}({1})'.format(func.name, func.inputs)
)
self.drop_udf(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
for func in udas:
self.log(
'Dropping aggregate function {0}({1})'.format(
func.name, func.inputs
)
)
self.drop_uda(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
else:
if len(tables) > 0 or len(udfs) > 0 or len(udas) > 0:
raise com.IntegrityError(
'Database {0} must be empty before '
'being dropped, or set '
'force=True'.format(name)
)
statement = ddl.DropDatabase(name, must_exist=not force)
return self._execute(statement)
def list_databases(self, like=None):
"""
List databases in the Impala cluster. Like the SHOW DATABASES command
in the impala-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SHOW DATABASES'
if like:
statement += " LIKE '{0}'".format(like)
with self._execute(statement, results=True) as cur:
results = self._get_list(cur)
return results
def get_schema(self, table_name, database=None):
"""
Return a Schema object for the indicated table and database
Parameters
----------
table_name : string
May be fully qualified
database : string, default None
Returns
-------
schema : ibis Schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
query = 'DESCRIBE {}'.format(qualified_name)
# only pull out the first two columns which are names and types
pairs = [row[:2] for row in self.con.fetchall(query)]
names, types = zip(*pairs)
ibis_types = [udf.parse_type(type.lower()) for type in types]
names = [name.lower() for name in names]
return sch.Schema(names, ibis_types)
@property
def client_options(self):
return self.con.options
@property
def version(self):
with self._execute('select version()', results=True) as cur:
raw = self._get_list(cur)[0]
vstring = raw.split()[2]
return parse_version(vstring)
def get_options(self):
"""
Return current query options for the Impala session
"""
query = 'SET'
return dict(row[:2] for row in self.con.fetchall(query))
def set_options(self, options):
self.con.set_options(options)
def reset_options(self):
# Must nuke all cursors
raise NotImplementedError
def set_compression_codec(self, codec):
"""
Parameters
"""
if codec is None:
codec = 'none'
else:
codec = codec.lower()
if codec not in ('none', 'gzip', 'snappy'):
raise ValueError('Unknown codec: {0}'.format(codec))
self.set_options({'COMPRESSION_CODEC': codec})
def exists_table(self, name, database=None):
"""
Determine if the indicated table or view exists
Parameters
----------
name : string
database : string, default None
Returns
-------
if_exists : boolean
"""
return len(self.list_tables(like=name, database=database)) > 0
def create_view(self, name, expr, database=None):
"""
Create an Impala view from a table expression
Parameters
----------
name : string
expr : ibis TableExpr
database : string, default None
"""
ast = self._build_ast(expr, ImpalaDialect.make_context())
select = ast.queries[0]
statement = ddl.CreateView(name, select, database=database)
return self._execute(statement)
def drop_view(self, name, database=None, force=False):
"""
Drop an Impala view
Parameters
----------
name : string
database : string, default None
force : boolean, default False
Database may throw exception if table does not exist
"""
statement = ddl.DropView(name, database=database, must_exist=not force)
return self._execute(statement)
def create_table(
self,
table_name,
obj=None,
schema=None,
database=None,
external=False,
force=False,
# HDFS options
format='parquet',
location=None,
partition=None,
like_parquet=None,
):
"""
Create a new table in Impala using an Ibis table expression. This is
currently designed for tables whose data is stored in HDFS (or
eventually other filesystems).
Parameters
----------
table_name : string
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with obj, creates an empty table with a
particular schema
database : string, default None (optional)
force : boolean, default False
Do not create table if table with indicated name already exists
external : boolean, default False
Create an external table; Impala will not delete the underlying data
when the table is dropped
format : {'parquet'}
location : string, default None
Specify the directory location where Impala reads and writes files
for the table
partition : list of strings
Must pass a schema to use this. Cannot partition from an expression
(create-table-as-select)
like_parquet : string (HDFS path), optional
Can specify in lieu of a schema
Examples
--------
>>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
"""
if like_parquet is not None:
raise NotImplementedError
if obj is not None:
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, to_insert = write_temp_dataframe(self, obj)
else:
to_insert = obj
ast = self._build_ast(to_insert, ImpalaDialect.make_context())
select = ast.queries[0]
statement = ddl.CTAS(
table_name,
select,
database=database,
can_exist=force,
format=format,
external=external,
partition=partition,
path=location,
)
elif schema is not None:
statement = ddl.CreateTableWithSchema(
table_name,
schema,
database=database,
format=format,
can_exist=force,
external=external,
path=location,
partition=partition,
)
else:
raise com.IbisError('Must pass obj or schema')
return self._execute(statement)
def avro_file(
self,
hdfs_dir,
avro_schema,
name=None,
database=None,
external=True,
persist=False,
):
"""
Create a (possibly temporary) table to read a collection of Avro data.
Parameters
----------
hdfs_dir : string
Absolute HDFS path to directory containing avro files
avro_schema : dict
The Avro schema for the data as a Python dict
name : string, default None
database : string, default None
external : boolean, default True
persist : boolean, default False
Returns
-------
avro_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableAvro(
name, hdfs_dir, avro_schema, database=database, external=external
)
self._execute(stmt)
return self._wrap_new_table(name, database, persist)
def delimited_file(
self,
hdfs_dir,
schema,
name=None,
database=None,
delimiter=',',
na_rep=None,
escapechar=None,
lineterminator=None,
external=True,
persist=False,
):
"""
Interpret delimited text files (CSV / TSV / etc.) as an Ibis table. See
`parquet_file` for more exposition on what happens under the hood.
Parameters
----------
hdfs_dir : string
HDFS directory name containing delimited text files
schema : ibis Schema
name : string, default None
Name for temporary or persistent table; otherwise random one
generated
database : string
Database to create the (possibly temporary) table in
delimiter : length-1 string, default ','
Pass None if there is no delimiter
escapechar : length-1 string
Character used to escape special characters
lineterminator : length-1 string
Character used to delimit lines
external : boolean, default True
Create table as EXTERNAL (data will not be deleted on drop). Not that
if persist=False and external=False, whatever data you reference will
be deleted
persist : boolean, default False
If True, do not delete the table upon garbage collection of ibis
table object
Returns
-------
delimited_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableDelimited(
name,
hdfs_dir,
schema,
database=database,
delimiter=delimiter,
external=external,
na_rep=na_rep,
lineterminator=lineterminator,
escapechar=escapechar,
)
self._execute(stmt)
return self._wrap_new_table(name, database, persist)
def parquet_file(
self,
hdfs_dir,
schema=None,
name=None,
database=None,
external=True,
like_file=None,
like_table=None,
persist=False,
):
"""
Make indicated parquet file in HDFS available as an Ibis table.
The table created can be optionally named and persisted, otherwise a
unique name will be generated. Temporarily, for any non-persistent
external table created by Ibis we will attempt to drop it when the
underlying object is garbage collected (or the Python interpreter shuts
down normally).
Parameters
----------
hdfs_dir : string
Path in HDFS
schema : ibis Schema
If no schema provided, and neither of the like_* argument is passed,
one will be inferred from one of the parquet files in the directory.
like_file : string
Absolute path to Parquet file in HDFS to use for schema
definitions. An alternative to having to supply an explicit schema
like_table : string
Fully scoped and escaped string to an Impala table whose schema we
will use for the newly created table.
name : string, optional
random unique name generated otherwise
database : string, optional
Database to create the (possibly temporary) table in
external : boolean, default True
If a table is external, the referenced data will not be deleted when
the table is dropped in Impala. Otherwise (external=False) Impala
takes ownership of the Parquet file.
persist : boolean, default False
Do not drop the table upon Ibis garbage collection / interpreter
shutdown
Returns
-------
parquet_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
# If no schema provided, need to find some absolute path to a file in
# the HDFS directory
if like_file is None and like_table is None and schema is None:
file_name = self.hdfs._find_any_file(hdfs_dir)
like_file = pjoin(hdfs_dir, file_name)
stmt = ddl.CreateTableParquet(
name,
hdfs_dir,
schema=schema,
database=database,
example_file=like_file,
example_table=like_table,
external=external,
can_exist=False,
)
self._execute(stmt)
return self._wrap_new_table(name, database, persist)
def _get_concrete_table_path(self, name, database, persist=False):
if not persist:
if name is None:
name = '__ibis_tmp_{0}'.format(util.guid())
if database is None:
self._ensure_temp_db_exists()
database = options.impala.temp_db
return name, database
else:
if name is None:
raise com.IbisError('Must pass table name if persist=True')
return name, database
def _ensure_temp_db_exists(self):
# TODO: session memoize to avoid unnecessary `SHOW DATABASES` calls
name, path = options.impala.temp_db, options.impala.temp_hdfs_path
if not self.exists_database(name):
if self._hdfs is None:
print(
'Without an HDFS connection, certain functionality'
' may be disabled'
)
else:
self.create_database(name, path=path, force=True)
def _wrap_new_table(self, name, database, persist):
qualified_name = self._fully_qualified_name(name, database)
if persist:
t = self.table(qualified_name)
else:
schema = self._get_table_schema(qualified_name)
node = ImpalaTemporaryTable(qualified_name, schema, self)
t = self.table_expr_class(node)
# Compute number of rows in table for better default query planning
cardinality = t.count().execute()
set_card = (
"alter table {0} set tblproperties('numRows'='{1}', "
"'STATS_GENERATED_VIA_STATS_TASK' = 'true')".format(
qualified_name, cardinality
)
)
self._execute(set_card)
self._temp_objects.add(t)
return t
def text_file(self, hdfs_path, column_name='value'):
"""
Interpret text data as a table with a single string column.
Parameters
----------
Returns
-------
text_table : TableExpr
"""
pass
def insert(
self,
table_name,
obj=None,
database=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""
Insert into existing table.
See ImpalaTable.insert for other parameters.
Parameters
----------
table_name : string
database : string, default None
Examples
--------
>>> table = 'my_table'
>>> con.insert(table, table_expr) # doctest: +SKIP
# Completely overwrite contents
>>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP
"""
table = self.table(table_name, database=database)
return table.insert(
obj=obj,
overwrite=overwrite,
partition=partition,
values=values,
validate=validate,
)
def load_data(
self, table_name, path, database=None, overwrite=False, partition=None
):
"""
Wraps the LOAD DATA DDL statement. Loads data into an Impala table by
physically moving data files.
Parameters
----------
table_name : string
database : string, default None (optional)
"""
table = self.table(table_name, database=database)
return table.load_data(path, overwrite=overwrite, partition=partition)
def drop_table(self, table_name, database=None, force=False):
"""
Drop an Impala table
Parameters
----------
table_name : string
database : string, default None (optional)
force : boolean, default False
Database may throw exception if table does not exist
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> con.drop_table(table, database=db, force=True) # doctest: +SKIP
"""
statement = ddl.DropTable(
table_name, database=database, must_exist=not force
)
self._execute(statement)
def truncate_table(self, table_name, database=None):
"""
Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional)
"""
statement = ddl.TruncateTable(table_name, database=database)
self._execute(statement)
def drop_table_or_view(self, name, database=None, force=False):
"""
Attempt to drop a relation that may be a view or table
"""
try:
self.drop_table(name, database=database)
except Exception as e:
try:
self.drop_view(name, database=database)
except Exception:
raise e
def cache_table(self, table_name, database=None, pool='default'):
"""
Caches a table in cluster memory in the given pool.
Parameters
----------
table_name : string
database : string default None (optional)
pool : string, default 'default'
The name of the pool in which to cache the table
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> pool = 'op_4GB_pool'
>>> con.cache_table('my_table', database=db, pool=pool) # noqa: E501 # doctest: +SKIP
"""
statement = ddl.CacheTable(table_name, database=database, pool=pool)
self._execute(statement)
def _get_table_schema(self, tname):
return self.get_schema(tname)
def _get_schema_using_query(self, query):
with self._execute(query, results=True) as cur:
# resets the state of the cursor and closes operation
cur.fetchall()
names, ibis_types = self._adapt_types(cur.description)
# per #321; most Impala tables will be lower case already, but Avro
# data, depending on the version of Impala, might have field names in
# the metastore cased according to the explicit case in the declared
# avro schema. This is very annoying, so it's easier to just conform on
# all lowercase fields from Impala.
names = [x.lower() for x in names]
return sch.Schema(names, ibis_types)
def create_function(self, func, name=None, database=None):
"""
Creates a function within Impala
Parameters
----------
func : ImpalaUDF or ImpalaUDA
Created with wrap_udf or wrap_uda
name : string (optional)
database : string (optional)
"""
if name is None:
name = func.name
database = database or self.current_database
if isinstance(func, udf.ImpalaUDF):
stmt = ddl.CreateUDF(func, name=name, database=database)
elif isinstance(func, udf.ImpalaUDA):
stmt = ddl.CreateUDA(func, name=name, database=database)
else:
raise TypeError(func)
self._execute(stmt)
def drop_udf(
self,
name,
input_types=None,
database=None,
force=False,
aggregate=False,
):
"""
Drops a UDF
If only name is given, this will search
for the relevant UDF and drop it.
To delete an overloaded UDF, give only a name and force=True
Parameters
----------
name : string
input_types : list of strings (optional)
force : boolean, default False Must be set to true to
drop overloaded UDFs
database : string, default None
aggregate : boolean, default False
"""
if not input_types:
if not database:
database = self.current_database
result = self.list_udfs(database=database, like=name)
if len(result) > 1:
if force:
for func in result:
self._drop_single_function(
func.name,
func.inputs,
database=database,
aggregate=aggregate,
)
return
else:
raise Exception(
"More than one function "
+ "with {0} found.".format(name)
+ "Please specify force=True"
)
elif len(result) == 1:
func = result.pop()
self._drop_single_function(
func.name,
func.inputs,
database=database,
aggregate=aggregate,
)
return
else:
raise Exception("No function found with name {0}".format(name))
self._drop_single_function(
name, input_types, database=database, aggregate=aggregate
)
def drop_uda(self, name, input_types=None, database=None, force=False):
"""
Drop aggregate function. See drop_udf for more information on the
parameters.
"""
return self.drop_udf(
name, input_types=input_types, database=database, force=force
)
def _drop_single_function(
self, name, input_types, database=None, aggregate=False
):
stmt = ddl.DropFunction(
name,
input_types,
must_exist=False,
aggregate=aggregate,
database=database,
)
self._execute(stmt)
def _drop_all_functions(self, database):
udfs = self.list_udfs(database=database)
for fnct in udfs:
stmt = ddl.DropFunction(
fnct.name,
fnct.inputs,
must_exist=False,
aggregate=False,
database=database,
)
self._execute(stmt)
udafs = self.list_udas(database=database)
for udaf in udafs:
stmt = ddl.DropFunction(
udaf.name,
udaf.inputs,
must_exist=False,
aggregate=True,
database=database,
)
self._execute(stmt)
def list_udfs(self, database=None, like=None):
"""
Lists all UDFs associated with given database
Parameters
----------
database : string
like : string for searching (optional)
"""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=False)
with self._execute(statement, results=True) as cur:
result = self._get_udfs(cur, udf.ImpalaUDF)
return result
def list_udas(self, database=None, like=None):
"""
Lists all UDAFs associated with a given database
Parameters
----------
database : string
like : string for searching (optional)
"""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=True)
with self._execute(statement, results=True) as cur:
result = self._get_udfs(cur, udf.ImpalaUDA)
return result
def _get_udfs(self, cur, klass):
def _to_type(x):
ibis_type = udf._impala_type_to_ibis(x.lower())
return dt.dtype(ibis_type)
tuples = cur.fetchall()
if len(tuples) > 0:
result = []
for tup in tuples:
out_type, sig = tup[:2]
name, types = _split_signature(sig)
types = _type_parser(types).types
inputs = []
for arg in types:
argm = _arg_type.match(arg)
var, simple = argm.groups()
if simple:
t = _to_type(simple)
inputs.append(t)
else:
t = _to_type(var)
inputs = rlz.listof(t)
# TODO
# inputs.append(varargs(t))
break
output = udf._impala_type_to_ibis(out_type.lower())
result.append(klass(inputs, output, name=name))
return result
else:
return []
def exists_udf(self, name, database=None):
"""
Checks if a given UDF exists within a specified database
Parameters
----------
name : string, UDF name
database : string, database name
Returns
-------
if_exists : boolean
"""
return len(self.list_udfs(database=database, like=name)) > 0
def exists_uda(self, name, database=None):
"""
Checks if a given UDAF exists within a specified database
Parameters
----------
name : string, UDAF name
database : string, database name
Returns
-------
if_exists : boolean
"""
return len(self.list_udas(database=database, like=name)) > 0
def compute_stats(self, name, database=None, incremental=False):
"""
Issue COMPUTE STATS command for a given table
Parameters
----------
name : string
Can be fully qualified (with database name)
database : string, optional
incremental : boolean, default False
If True, issue COMPUTE INCREMENTAL STATS
"""
maybe_inc = 'INCREMENTAL ' if incremental else ''
cmd = 'COMPUTE {0}STATS'.format(maybe_inc)
stmt = self._table_command(cmd, name, database=database)
self._execute(stmt)
def invalidate_metadata(self, name=None, database=None):
"""
Issue INVALIDATE METADATA command, optionally only applying to a
particular table. See Impala documentation.
Parameters
----------
name : string, optional
Table name. Can be fully qualified (with database)
database : string, optional
"""
stmt = 'INVALIDATE METADATA'
if name is not None:
stmt = self._table_command(stmt, name, database=database)
self._execute(stmt)
def refresh(self, name, database=None):
"""
Reload HDFS block location metadata for a table, for example after
ingesting data as part of an ETL pipeline. Related to INVALIDATE
METADATA. See Impala documentation for more.
Parameters
----------
name : string
Table name. Can be fully qualified (with database)
database : string, optional
"""
# TODO(wesm): can this statement be cancelled?
stmt = self._table_command('REFRESH', name, database=database)
self._execute(stmt)
def describe_formatted(self, name, database=None):
"""
Retrieve results of DESCRIBE FORMATTED command. See Impala
documentation for more.
Parameters
----------
name : string
Table name. Can be fully qualified (with database)
database : string, optional
"""
from ibis.impala.metadata import parse_metadata
stmt = self._table_command(
'DESCRIBE FORMATTED', name, database=database
)
query = ImpalaQuery(self, stmt)
result = query.execute()
# Leave formatting to pandas
for c in result.columns:
result[c] = result[c].str.strip()
return parse_metadata(result)
def show_files(self, name, database=None):
"""
Retrieve results of SHOW FILES command for a table. See Impala
documentation for more.
Parameters
----------
name : string
Table name. Can be fully qualified (with database)
database : string, optional
"""
stmt = self._table_command('SHOW FILES IN', name, database=database)
return self._exec_statement(stmt)
def list_partitions(self, name, database=None):
stmt = self._table_command('SHOW PARTITIONS', name, database=database)
return self._exec_statement(stmt)
def table_stats(self, name, database=None):
"""
Return results of SHOW TABLE STATS for indicated table. See also
ImpalaTable.stats
"""
stmt = self._table_command('SHOW TABLE STATS', name, database=database)
return self._exec_statement(stmt)
def column_stats(self, name, database=None):
"""
Return results of SHOW COLUMN STATS for indicated table. See also
ImpalaTable.column_stats
"""
stmt = self._table_command(
'SHOW COLUMN STATS', name, database=database
)
return self._exec_statement(stmt)
def _exec_statement(self, stmt, adapter=None):
query = ImpalaQuery(self, stmt)
result = query.execute()
if adapter is not None:
result = adapter(result)
return result
def _table_command(self, cmd, name, database=None):
qualified_name = self._fully_qualified_name(name, database)
return '{0} {1}'.format(cmd, qualified_name)
def _adapt_types(self, descr):
names = []
adapted_types = []
for col in descr:
names.append(col[0])
impala_typename = col[1]
typename = udf._impala_to_ibis_type[impala_typename.lower()]
if typename == 'decimal':
precision, scale = col[4:6]
adapted_types.append(dt.Decimal(precision, scale))
else:
adapted_types.append(typename)
return names, adapted_types
def write_dataframe(self, df, path, format='csv'):
"""
Write a pandas DataFrame to indicated file path (default: HDFS) in the
indicated format
Parameters
----------
df : DataFrame
path : string
Absolute output path
format : {'csv'}, default 'csv'
Returns
-------
None (for now)
"""
from ibis.impala.pandas_interop import DataFrameWriter
writer = DataFrameWriter(self, df)
return writer.write_csv(path)
# ----------------------------------------------------------------------
# ORM-ish usability layer
class ScalarFunction(DatabaseEntity):
def drop(self):
pass
class AggregateFunction(DatabaseEntity):
def drop(self):
pass
class ImpalaTemporaryTable(ops.DatabaseTable):
def __del__(self):
try:
self.drop()
except com.IbisError:
pass
def drop(self):
try:
self.source.drop_table(self.name)
except ImpylaError:
# database might have been dropped
pass
def _validate_compatible(from_schema, to_schema):
if set(from_schema.names) != set(to_schema.names):
raise com.IbisInputError('Schemas have different names')
for name in from_schema:
lt = from_schema[name]
rt = to_schema[name]
if not lt.castable(rt):
raise com.IbisInputError(
'Cannot safely cast {0!r} to {1!r}'.format(lt, rt)
)
def _split_signature(x):
name, rest = x.split('(', 1)
return name, rest[:-1]
_arg_type = re.compile(r'(.*)\.\.\.|([^\.]*)')
class _type_parser:
NORMAL, IN_PAREN = 0, 1
def __init__(self, value):
self.value = value
self.state = self.NORMAL
self.buf = io.StringIO()
self.types = []
for c in value:
self._step(c)
self._push()
def _push(self):
val = self.buf.getvalue().strip()
if val:
self.types.append(val)
self.buf = io.StringIO()
def _step(self, c):
if self.state == self.NORMAL:
if c == '(':
self.state = self.IN_PAREN
elif c == ',':
self._push()
return
elif self.state == self.IN_PAREN:
if c == ')':
self.state = self.NORMAL
self.buf.write(c)
| apache-2.0 |
Silmathoron/nest-simulator | pynest/examples/brette_gerstner_fig_3d.py | 12 | 3030 | # -*- coding: utf-8 -*-
#
# brette_gerstner_fig_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Testing the adapting exponential integrate and fire model in NEST (Brette and Gerstner Fig 3D)
----------------------------------------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner [1]_ reproduces Figure 3D of the paper.
Note that Brette and Gerstner give the value for `b` in `nA`.
To be consistent with the other parameters in the equations, `b` must be
converted to `pA` (pico Ampere).
References
~~~~~~~~~~~
.. [1] Brette R and Gerstner W (2005). Adaptive exponential integrate-and-fire model as an effective
description of neuronal activity J. Neurophysiology. https://doi.org/10.1152/jn.00686.2005
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_exp")
###############################################################################
# Set the parameters of the neuron according to the paper.
neuron.set(V_peak=20., E_L=-60.0, a=80.0, b=80.5, tau_w=720.0)
###############################################################################
# Create and configure the stimulus which is a step current.
dc = nest.Create("dc_generator")
dc.set(amplitude=-800.0, start=0.0, stop=400.0)
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a ``voltmeter`` to sample the membrane potentials from the neuron
# in intervals of 0.1 ms.
voltmeter = nest.Create("voltmeter", params={'interval': 0.1})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.axis([0, 1000, -85, 0])
nest.voltage_trace.show()
| gpl-2.0 |
iulian787/spack | var/spack/repos/builtin/packages/bart/package.py | 2 | 3092 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bart(MakefilePackage, CudaPackage):
"""BART: Toolbox for Computational Magnetic Resonance Imaging"""
homepage = "https://mrirecon.github.io/bart/"
url = "https://github.com/mrirecon/bart/archive/v0.5.00.tar.gz"
version('0.6.00', sha256='dbbd33d1e3ed3324fe21f90a3b62cb51765fe369f21df100b46a32004928f18d')
version('0.5.00', sha256='30eedcda0f0ef3808157542e0d67df5be49ee41e4f41487af5c850632788f643')
# patch to fix build with MKL
patch('https://github.com/mrirecon/bart/commit/b62ca4972d5ac41a44217a5c27123c15daae74db.patch',
sha256='8fd1be181da928448da750b32d45ee6dce7ba6af0424617c4f8d653cf3f05445',
when='@0.5.00')
# patch to fix Makefile for openblas and cuda
patch('Makefile.patch')
# patch to set path to bart
patch('bart_path-0.5.00.patch', when='@0.5.00')
patch('bart_path-0.6.00.patch', when='@0.6.00')
depends_on('libpng')
depends_on('fftw')
depends_on('blas')
depends_on('lapack')
depends_on('py-numpy', type='run')
depends_on('py-matplotlib', type='run')
extends('python')
conflicts('^atlas', msg='BART does not currently support atlas')
def edit(self, spec, prefix):
env['PREFIX'] = prefix
env['FFTW_BASE'] = spec['fftw'].prefix
if spec['blas'].name == 'openblas':
env['OPENBLAS'] = '1'
if spec['blas'].name in ['intel-mkl', 'intel-parallel-studio']:
env['MKL'] = '1'
env['MKL_BASE'] = env['MKLROOT']
else:
env['BLAS_BASE'] = spec['blas'].prefix
if '^netlib-lapack+lapacke' not in spec:
env['NOLAPACKE'] = '1'
if '+cuda' in spec:
cuda_arch = self.spec.variants['cuda_arch'].value
env['CUDA'] = '1'
env['CUDA_BASE'] = spec['cuda'].prefix
env['GPUARCH_FLAGS'] = ' '.join(self.cuda_flags(cuda_arch))
def install(self, spec, prefix):
python_dir = join_path(prefix,
spec['python'].package.site_packages_dir)
make('install')
install_tree('scripts', prefix.scripts)
install_tree('matlab', prefix.matlab)
install('startup.m', prefix)
install('python/bart.py', python_dir)
install('python/cfl.py', python_dir)
install('python/wslsupport.py', python_dir)
if '^python@3:' in spec:
install('python/bartview3.py', join_path(prefix.bin, 'bartview'))
filter_file(r'#!/usr/bin/python3', '#!/usr/bin/env python',
prefix.bin.bartview)
else:
install('python/bartview.py', join_path(prefix.bin, 'bartview'))
filter_file(r'#!/usr/bin/python', '#!/usr/bin/env python',
prefix.bin.bartview)
def setup_run_environment(self, env):
env.set('TOOLBOX_PATH', self.prefix)
| lgpl-2.1 |
Locottus/Python | polygonsKML.py | 1 | 2599 | import pandas as pd
import glob
import os
import datetime
import random
import decimal
#https://developers.google.com/kml/documentation/kml_tut
#global vars
path = "."
all_files = glob.glob(os.path.join(path, "*.csv")) #make list of files in path
now = datetime.datetime.now() #get current date for output file
fname = 'Polygon' + str(now.day) + str(now.month) + str(now.year) + '.kml'
f = open(fname , 'w')#open output file with current date
xmlHeader = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">
<Document>\n"""
xmlFooter = '\n</Document>\n</kml>\n'
def pointConcat(df):
for index, row in df.iterrows():
line = str(row[0]) + ',' + str(row[1]) + ',' + str(row[2]) + '.'
f.write(line + '\n')
print(line)
def polygonCreation(df,p):
line = """\n<Placemark>
<name>""" + p + """</name>
<Polygon>
<tessellate>1</tessellate>
<extrude>1</extrude>
<gx:altitudeMode>relativeToGround</gx:altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>\n"""
f.write(line)
strude = str(decimal.Decimal(random.randrange(250, 400))/100)#random strude from 2.50 to 4.00
for index, row in df.iterrows():
if ((str(row[6]) == p )):
line = str(row[0]) + ',' + str(row[1]) + ',' + strude
f.write(line + '\n')
print(line)
line = """ </coordinates>
</LinearRing>
</outerBoundaryIs>
</Polygon>
</Placemark>
\n"""
f.write(line)
def isPolygon(df,p):
for index, row in df.iterrows():
if ((str(row[6]) == str(p))):
return row[7];
return False
#main()
f.write(xmlHeader + '\n')#write header
for file in all_files:
# Getting the file name without extension
file_name = os.path.splitext(os.path.basename(file))[0]
# Reading the file content to create a DataFrame
dataFile = pd.read_csv(file, header=None)
df = pd.DataFrame(dataFile)
polygons = df[6].unique()
for p in polygons:
if (isPolygon(df,p)):
print('******' + str(p))
polygonCreation(df,str(p))
#pointConcat(df)
f.write(xmlFooter+ '\n')#write footer
f.close()
print('end of line')
#my output file contains Longitude,Latitude,Altitude in a csv file.
#-90.5298628393332,14.608550970260422,1524.911447514169,2.1963847,1519558183000,8.0,comment,TRUE,num_poly
| mit |
IBMStreams/streamsx.topology | com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/context.py | 1 | 79645 | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015,2020
"""
Context for submission and build of topologies.
"""
__all__ = ['ContextTypes', 'ConfigParams', 'JobConfig', 'SubmissionResult', 'submit', 'build', 'run']
import logging
import os
import os.path
import shutil
import json
import platform
import subprocess
import threading
import sys
import codecs
import tempfile
import copy
import time
import warnings
import streamsx.rest
import streamsx.rest_primitives
import streamsx._streams._version
import urllib.parse as up
__version__ = streamsx._streams._version.__version__
logger = logging.getLogger(__name__)
#
# Submission of a python graph using the Java Application API
# The JAA is reused to have a single set of code_createJSONFile that creates
# SPL, the toolkit, the bundle and submits it to the relevant
# environment
#
def submit(ctxtype, graph, config=None, username=None, password=None):
"""
Submits a `Topology` (application) using the specified context type.
Used to submit an application for compilation into a Streams application and
execution within an Streaming Analytics service or IBM Streams instance.
`ctxtype` defines how the application will be submitted, see :py:class:`ContextTypes`.
The parameters `username` and `password` are only required when submitting to an
IBM Streams instance and it is required to access the Streams REST API from the
code performing the submit. Accessing data from views created by
:py:meth:`~streamsx.topology.topology.Stream.view` requires access to the Streams REST API.
Args:
ctxtype(str): Type of context the application will be submitted to. A value from :py:class:`ContextTypes`.
graph(Topology): The application topology to be submitted.
config(dict): Configuration for the submission, augmented with values such as a :py:class:`JobConfig` or keys from :py:class:`ConfigParams`.
username(str): Deprecated: Username for the Streams REST api. Use environment variable ``STREAMS_USERNAME`` if using user-password authentication.
password(str): Deprecated: Password for `username`. Use environment variable ``STREAMS_PASSWORD`` if using user-password authentication.
Returns:
SubmissionResult: Result of the submission. Content depends on :py:class:`ContextTypes`
constant passed as `ctxtype`.
"""
streamsx._streams._version._mismatch_check(__name__)
graph = graph.graph
if not graph.operators:
raise ValueError("Topology {0} does not contain any streams.".format(graph.topology.name))
if username or password:
warnings.warn("Use environment variables STREAMS_USERNAME and STREAMS_PASSWORD", DeprecationWarning, stacklevel=2)
context_submitter = _SubmitContextFactory(graph, config, username, password).get_submit_context(ctxtype)
sr = SubmissionResult(context_submitter.submit())
sr._submitter = context_submitter
return sr
def build(topology, config=None, dest=None, verify=None):
"""
Build a topology to produce a Streams application bundle.
Builds a topology using :py:func:`submit` with context type :py:const:`~ContextTypes.BUNDLE`. The result is a sab file on the local file system along
with a job config overlay file matching the application.
The build uses a build service or a local install, see :py:const:`~ContextTypes.BUNDLE` for details.
Args:
topology(Topology): Application topology to be built.
config(dict): Configuration for the build.
dest(str): Destination directory for the sab and JCO files. Default is context specific.
verify: SSL verification used by requests when using a build service. Defaults to enabling SSL verification.
Returns:
3-element tuple containing
- **bundle_path** (*str*): path to the bundle (sab file) or ``None`` if not created.
- **jco_path** (*str*): path to file containing the job config overlay for the application or ``None`` if not created.
- **result** (*SubmissionResult*): value returned from ``submit``.
.. seealso:: :py:const:`~ContextTypes.BUNDLE` for details on how to configure the build service to use.
.. versionadded:: 1.14
"""
if verify is not None:
config = config.copy() if config else dict()
config[ConfigParams.SSL_VERIFY] = verify
sr = submit(ContextTypes.BUNDLE, topology, config=config)
if 'bundlePath' in sr:
if dest:
bundle = sr['bundlePath']
bundle_dest = os.path.join(dest, os.path.basename(bundle))
if os.path.exists(bundle_dest): os.remove(bundle_dest)
shutil.move(bundle, dest)
sr['bundlePath'] = bundle_dest
jco = sr['jobConfigPath']
jco_dest = os.path.join(dest, os.path.basename(jco))
if os.path.exists(jco_dest): os.remove(jco_dest)
shutil.move(jco, dest)
sr['jobConfigPath'] = jco_dest
return sr['bundlePath'], sr['jobConfigPath'], sr
return None, None, sr
class _BaseSubmitter(object):
"""
A submitter which handles submit operations common across all submitter types..
"""
def __init__(self, ctxtype, config, graph):
self.ctxtype = ctxtype
self.config = dict()
if config is not None:
# Make copy of config to avoid modifying
# the callers config
self.config.update(config)
# When SERVICE_DEFINITION is a String, it is assumed that
# it is JSON SAS credentials, which must be converted to a JSON object
service_def = self.config.get(ConfigParams.SERVICE_DEFINITION)
if service_def:
if isinstance(service_def, str):
self.config[ConfigParams.SERVICE_DEFINITION] = json.loads(service_def)
self.config['contextType'] = str(self.ctxtype)
if 'originator' not in self.config:
self.config['originator'] = 'topology-' + __version__ + ':python-' + platform.python_version()
self.graph = graph
self.fn = None
self.results_file = None
self.keepArtifacts = False
if 'topology.keepArtifacts' in self.config:
self.keepArtifacts = self.config.get('topology.keepArtifacts')
def _config(self):
"Return the submit configuration"
return self.config
def submit(self):
# Convert the JobConfig into overlays
self._create_job_config_overlays()
# encode the relevant python version information into the config
self._add_python_info()
# Create the json file containing the representation of the application
try:
self._create_json_file(self._create_full_json())
except IOError:
logger.error("Error writing json graph to file.")
raise
try:
return self._submit_exec()
finally:
if not self.keepArtifacts:
_delete_json(self)
def _submit_exec(self):
tk_root = self._get_toolkit_root()
cp = os.path.join(tk_root, "lib", "com.ibm.streamsx.topology.jar")
remote_context = False
streams_install = os.environ.get('STREAMS_INSTALL')
# If there is no streams install, get java from JAVA_HOME and use the remote contexts.
if streams_install is None:
java_home = os.environ.get('JAVA_HOME')
if java_home is None:
raise ValueError("JAVA_HOME not found. Please set the JAVA_HOME system variable")
jvm = os.path.join(java_home, "bin", "java")
remote_context = True
# Otherwise, use the Java version from the streams install
else:
jvm = os.path.join(streams_install, "java", "jre", "bin", "java")
if self.config.get(ConfigParams.FORCE_REMOTE_BUILD):
remote_context = True
cp = cp + ':' + os.path.join(streams_install, "lib", "com.ibm.streams.operator.samples.jar")
progress_fn = lambda _ : None
if remote_context:
submit_class = "com.ibm.streamsx.topology.context.remote.RemoteContextSubmit"
try:
# Verify we are in a IPython env.
get_ipython() # noqa : F821
import ipywidgets as widgets
logger.debug("ipywidgets available - creating IntProgress")
progress_bar = widgets.IntProgress(
value=0,
min=0, max=10, step=1,
description='Initializing',
bar_style='info', orientation='horizontal',
style={'description_width':'initial'})
logger.debug("ipywidgets available - created IntProgress")
try:
display(progress_bar) # noqa : F821
def _show_progress(msg):
if msg is True:
progress_bar.value = progress_bar.max
progress_bar.bar_style = 'success'
return
if msg is False:
progress_bar.bar_style = 'danger'
return
msg = msg.split('-')
progress_bar.value += 1
progress_bar.description = msg[3]
progress_fn = _show_progress
except:
logger.debug("ipywidgets IntProgress error: %s", sys.exc_info()[1])
pass
except:
logger.debug("ipywidgets not available: %s", sys.exc_info()[1])
pass
else:
submit_class = "com.ibm.streamsx.topology.context.local.StreamsContextSubmit"
jul_cfg = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logging.properties')
jul = '-Djava.util.logging.config.file=' + jul_cfg
args = [jvm, '-classpath', cp, jul, submit_class, self.ctxtype, self.fn, str(logging.getLogger().getEffectiveLevel())]
logger.info("Generating SPL and submitting application.")
proc_env = self._get_java_env()
process = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, env=proc_env)
stderr_thread = threading.Thread(target=_print_process_stderr, args=([process, self, progress_fn]))
stderr_thread.daemon = True
stderr_thread.start()
stdout_thread = threading.Thread(target=_print_process_stdout, args=([process]))
stdout_thread.daemon = True
stdout_thread.start()
process.wait()
results_json = {}
# Only try to read the results file if the submit was successful.
if process.returncode == 0:
with open(self.results_file) as _file:
try:
results_json = json.loads(_file.read())
progress_fn(True)
except IOError:
logger.error("Could not read file:" + str(_file.name))
progress_fn(False)
raise
except json.JSONDecodeError:
logger.error("Could not parse results file:" + str(_file.name))
progress_fn(False)
raise
except:
logger.error("Unknown error while processing results file.")
progress_fn(False)
raise
else:
progress_fn(False)
results_json['return_code'] = process.returncode
self._augment_submission_result(results_json)
self.submission_results = results_json
return results_json
def _augment_submission_result(self, submission_result):
"""Allow a subclass to augment a submission result"""
pass
def _get_java_env(self):
"Get the environment to be passed to the Java execution"
return os.environ.copy()
def _add_python_info(self):
# Python information added to deployment
pi = {}
pi["prefix"] = sys.exec_prefix
pi["version"] = sys.version
pi['major'] = sys.version_info.major
pi['minor'] = sys.version_info.minor
self.config["python"] = pi
def _create_job_config_overlays(self):
if ConfigParams.JOB_CONFIG in self.config:
jco = self.config[ConfigParams.JOB_CONFIG]
del self.config[ConfigParams.JOB_CONFIG]
jco._add_overlays(self.config)
def _create_full_json(self):
fj = dict()
# Removing Streams Connection object because it is not JSON serializable, and not applicable for submission
# Need to re-add it, since the StreamsConnection needs to be returned from the submit.
sc = self.config.pop(ConfigParams.STREAMS_CONNECTION, None)
fj["deploy"] = self.config.copy()
fj["graph"] = self.graph.generateSPLGraph()
_file = tempfile.NamedTemporaryFile(prefix="results", suffix=".json", mode="w+t", delete=False)
_file.close()
fj["submissionResultsFile"] = _file.name
self.results_file = _file.name
logger.debug("Results file created at " + _file.name)
if sc is not None:
self.config[ConfigParams.STREAMS_CONNECTION] = sc
return fj
def _create_json_file(self, fj):
if sys.hexversion < 0x03000000:
tf = tempfile.NamedTemporaryFile(mode="w+t", suffix=".json", prefix="splpytmp", delete=False)
else:
tf = tempfile.NamedTemporaryFile(mode="w+t", suffix=".json", encoding="UTF-8", prefix="splpytmp",
delete=False)
tf.write(json.dumps(fj, sort_keys=True, indent=2, separators=(',', ': ')))
tf.close()
self.fn = tf.name
def _setup_views(self):
# Link each view back to this context.
for view in self.graph._views:
view.stop_data_fetch()
view._submit_context = self
def streams_connection(self):
raise NotImplementedError("Views require submission to DISTRIBUTED or ANALYTICS_SERVICE context")
# There are two modes for execution.
#
# Pypi (Python focused)
# Pypi (pip install) package includes the SPL toolkit as
# streamsx/.toolkit/com.ibm.streamsx.topology
# However the streamsx Python packages have been moved out
# of the toolkit's (opt/python/package) compared
# to the original toolkit layout. They are moved to the
# top level of the pypi package.
#
# SPL Toolkit (SPL focused):
# Streamsx Python packages are executed from opt/python/packages
#
# This function determines the root of the SPL toolkit based
# upon the existance of the '.toolkit' directory.
#
@staticmethod
def _get_toolkit_root():
# Directory of this file (streamsx/topology)
dir = os.path.dirname(os.path.abspath(__file__))
# This is streamsx
dir = os.path.dirname(dir)
# See if .toolkit exists, if so executing from
# a pip install
tk_root = os.path.join(dir, '.toolkit', 'com.ibm.streamsx.topology')
if os.path.isdir(tk_root):
return tk_root
# Else dir is tk/opt/python/packages/streamsx
dir = os.path.dirname(dir)
dir = os.path.dirname(dir)
dir = os.path.dirname(dir)
tk_root = os.path.dirname(dir)
return tk_root
class _StreamingAnalyticsSubmitter(_BaseSubmitter):
"""
A submitter supports the ANALYTICS_SERVICE (Streaming Analytics service) context.
"""
# Maintains the last time by service in ms since epoch the last
# time a thread saw the service running. Allows avoidance of
# status checks when we are somewhat confident the service
# is running, eg. during test runs or repeated submissions.
_SERVICE_ACTIVE = threading.local()
def __init__(self, ctxtype, config, graph):
super(_StreamingAnalyticsSubmitter, self).__init__(ctxtype, config, graph)
self._streams_connection = self._config().get(ConfigParams.STREAMS_CONNECTION)
if ConfigParams.SERVICE_DEFINITION in self._config():
# Convert the service definition to a VCAP services definition.
# Which is then passed through to Java as a VCAP_SERVICES env var
# Service name matching the generated VCAP is passed through config.
service_def = self._config().get(ConfigParams.SERVICE_DEFINITION)
self._vcap_services = _vcap_from_service_definition(service_def)
self._config()[ConfigParams.SERVICE_NAME] = _name_from_service_definition(service_def)
else:
self._vcap_services = self._config().get(ConfigParams.VCAP_SERVICES)
self._service_name = self._config().get(ConfigParams.SERVICE_NAME)
if self._streams_connection is not None:
if not isinstance(self._streams_connection, streamsx.rest.StreamingAnalyticsConnection):
raise ValueError("config must contain a StreamingAnalyticsConnection object when submitting to "
"{} context".format(ctxtype))
# Use credentials stored within StreamingAnalyticsConnection
self._service_name = self._streams_connection.service_name
self._vcap_services = {'streaming-analytics': [
{'name': self._service_name, 'credentials': self._streams_connection.credentials}
]}
self._config()[ConfigParams.SERVICE_NAME] = self._service_name
# TODO: Compare credentials between the config and StreamsConnection, verify they are the same
# Clear the VCAP_SERVICES key in config, since env var will contain the content
self._config().pop(ConfigParams.VCAP_SERVICES, None)
self._config().pop(ConfigParams.SERVICE_DEFINITION, None)
self._setup_views()
self._job = None
def _create_full_json(self):
fj = super(_StreamingAnalyticsSubmitter, self)._create_full_json()
if hasattr(_StreamingAnalyticsSubmitter._SERVICE_ACTIVE, 'running'):
rts = _StreamingAnalyticsSubmitter._SERVICE_ACTIVE.running
if self._service_name in rts:
sn = self._service_name if self._service_name else os.environ['STREAMING_ANALYTICS_SERVICE_NAME']
fj['deploy']['serviceRunningTime'] = rts[sn]
return fj
def _job_access(self):
if self._job:
return self._job
if self._streams_connection is None:
self._streams_connection = streamsx.rest.StreamingAnalyticsConnection(self._vcap_services, self._service_name)
self._job = self._streams_connection.get_instances()[0].get_job(
id=self.submission_results['jobId'])
return self._job
def _augment_submission_result(self, submission_result):
vcap = streamsx.rest._get_vcap_services(self._vcap_services)
credentials = streamsx.rest._get_credentials(vcap, self._service_name)
if streamsx.rest_primitives._IAMConstants.V2_REST_URL in credentials:
instance_id = credentials[streamsx.rest_primitives._IAMConstants.V2_REST_URL].split('streaming_analytics/', 1)[1]
else:
instance_id = credentials['jobs_path'].split('/service_instances/', 1)[1].split('/', 1)[0]
submission_result['instanceId'] = instance_id
if 'jobId' in submission_result:
if not hasattr(_StreamingAnalyticsSubmitter._SERVICE_ACTIVE, 'running'):
_StreamingAnalyticsSubmitter._SERVICE_ACTIVE.running = dict()
sn = self._service_name if self._service_name else os.environ['STREAMING_ANALYTICS_SERVICE_NAME']
_StreamingAnalyticsSubmitter._SERVICE_ACTIVE.running[sn] = int(time.time() * 1000.0)
def _get_java_env(self):
"Pass the VCAP through the environment to the java submission"
env = super(_StreamingAnalyticsSubmitter, self)._get_java_env()
vcap = streamsx.rest._get_vcap_services(self._vcap_services)
env['VCAP_SERVICES'] = json.dumps(vcap)
return env
class _BundleSubmitter(_BaseSubmitter):
"""
A submitter which supports the BUNDLE context
including remote build.
"""
def __init__(self, ctxtype, config, graph):
_BaseSubmitter.__init__(self, ctxtype, config, graph)
self._remote = config.get(ConfigParams.FORCE_REMOTE_BUILD)
if not self._remote and 'STREAMS_INSTALL' in os.environ:
return
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self._streams_connection is not None:
pass
else:
# Look for a service definition
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if not svc_info:
# Look for endpoint set by env vars.
inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if inst is not None:
self._streams_connection = inst.rest_client._sc
if isinstance(self._streams_connection, streamsx.rest.StreamsConnection):
if isinstance(self._streams_connection.session.auth, streamsx.rest_primitives._ICPDExternalAuthHandler):
svc_info = self._streams_connection.session.auth._cfg
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
if self._streams_connection.session.verify == False:
self._config()[ConfigParams.SSL_VERIFY] = False
else:
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if svc_info:
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
streamsx.rest_primitives.Instance._clear_service_info(self._config())
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_BundleSubmitter, self)._get_java_env()
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
if self._remote:
env.pop('STREAMS_INSTALL', None)
return env
class _EdgeSubmitter(_BaseSubmitter):
"""
A submitter which supports the EDGE context (force remote build).
"""
def __init__(self, ctxtype, config, graph):
_BaseSubmitter.__init__(self, ctxtype, config, graph)
config[ConfigParams.FORCE_REMOTE_BUILD] = True # EDGE is always remote build
self._remote = config.get(ConfigParams.FORCE_REMOTE_BUILD)
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self._streams_connection is not None:
pass
else:
# Look for a service definition
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if not svc_info:
# Look for endpoint set by env vars.
inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if inst is not None:
self._streams_connection = inst.rest_client._sc
if isinstance(self._streams_connection, streamsx.rest.StreamsConnection):
if isinstance(self._streams_connection.session.auth, streamsx.rest_primitives._ICPDExternalAuthHandler):
svc_info = self._streams_connection.session.auth._cfg
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
if self._streams_connection.session.verify == False:
self._config()[ConfigParams.SSL_VERIFY] = False
else:
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if svc_info:
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
streamsx.rest_primitives.Instance._clear_service_info(self._config())
# check that serviceBuildPoolsEndpoint is set
try:
serviceBuildPoolsEndpoint = self._config()[ConfigParams.SERVICE_DEFINITION]['connection_info']['serviceBuildPoolsEndpoint']
except KeyError:
raise RuntimeError('Build service is not configured for EDGE submission')
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_EdgeSubmitter, self)._get_java_env()
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
if self._remote:
env.pop('STREAMS_INSTALL', None)
return env
def _get_distributed_submitter(config, graph, username, password):
# CP4D integrated environment and within project
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if svc_info:
return _DistributedSubmitterCP4DIntegratedProject(config, graph, svc_info)
# CP4D integrated environment external to project
if 'CP4D_URL' in os.environ and \
'STREAMS_INSTANCE_ID' in os.environ and \
'STREAMS_PASSWORD' in os.environ:
return _DistributedSubmitterCP4DIntegrated(config, graph)
# CP4D standalone environment
if 'STREAMS_REST_URL' in os.environ and \
'STREAMS_PASSWORD' in os.environ:
return _DistributedSubmitterCP4DStandalone(config, graph)
# Streams 4.2/4.3 by connection
if 'STREAMS_INSTALL' in os.environ and \
'STREAMS_INSTANCE_ID' in os.environ and \
ConfigParams.STREAMS_CONNECTION in config and \
isinstance(config[ConfigParams.STREAMS_CONNECTION], streamsx.rest.StreamsConnection):
return _DistributedSubmitter4Conn(config, graph, username, password)
# Streams 4.2/4.3 by environment
if 'STREAMS_INSTALL' in os.environ and \
'STREAMS_DOMAIN_ID' in os.environ and \
'STREAMS_INSTANCE_ID' in os.environ:
return _DistributedSubmitter4(config, graph, username, password)
raise RuntimeError('Insufficient configuration for DISTRIBUTED submission')
class _DistributedSubmitter(_BaseSubmitter):
"""
A submitter which supports the DISTRIBUTED context.
Sub-classed for specific configurations
"""
def __init__(self, config, graph, username, password):
super(_DistributedSubmitter, self).__init__(ContextTypes.DISTRIBUTED, config, graph)
self._streams_connection = None
self.username = username
self.password = password
self._job = None
# Give each view in the app the necessary information to connect to SWS.
self._setup_views()
def _job_access(self):
if self._job:
return self._job
instance = self._get_instance()
self._job = instance.get_job(id=self.submission_results['jobId'])
return self._job
class _DistributedSubmitterCP4DIntegratedProject(_DistributedSubmitter):
"""
A submitter which supports the CPD integrated configuration
within a project.
"""
def __init__(self, config, graph, svc_info):
super(_DistributedSubmitterCP4DIntegratedProject, self).__init__(config, graph, None, None)
# use the config here rather than svc_info as the config contains SSL_VERIFY
streams_instance = streamsx.rest_primitives.Instance.of_service(config)
if hasattr(streams_instance, 'productVersion'):
svc_info['productVersion'] = streams_instance.productVersion
# when we use the REST-API of the CP4D from inside the CP4D (Notebook in a project)
# we go over this URL: https://internal-nginx-svc:12443
svc_info['cluster_ip'] = 'internal-nginx-svc'
svc_info['cluster_port'] = 12443
# user-provided cp4d URL to override the hard-coded from above
if ConfigParams.CP4D_URL in config:
userUrl = config[ConfigParams.CP4D_URL]
if userUrl:
es = up.urlparse(userUrl)
if ':' in es.netloc:
cluster_ip = es.netloc.split(':')[0]
cluster_port = es.netloc.split(':')[1]
else:
cluster_ip = es.netloc
cluster_port = 443
svc_info['cluster_ip_orig'] = svc_info['cluster_ip']
svc_info['cluster_port_orig'] = svc_info['cluster_port']
svc_info['cluster_ip'] = cluster_ip
svc_info['cluster_port'] = cluster_port
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
self._config()[ConfigParams.FORCE_REMOTE_BUILD] = True
streamsx.rest_primitives.Instance._clear_service_info(self._config())
def _get_instance(self):
return streamsx.rest_primitives.Instance.of_service(self._config())
def _get_java_env(self):
env = super(_DistributedSubmitterCP4DIntegratedProject, self)._get_java_env()
env.pop('CP4D_URL', None)
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
class _DistributedSubmitterCP4DIntegrated(_DistributedSubmitter):
"""
A submitter which supports the CPD integrated configuration
outside a project.
"""
def __init__(self, config, graph):
super(_DistributedSubmitterCP4DIntegrated, self).__init__(config, graph, None, None)
# Look for endpoint set by env vars.
self._inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if self._inst is None:
raise ValueError("Incorrect configuration for Cloud Pak for Data integrated configuration")
self._streams_connection = self._inst.rest_client._sc
svc_info = self._streams_connection.session.auth._cfg
if hasattr(self._inst, 'productVersion'):
svc_info['productVersion'] = self._inst.productVersion
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
self._config()[ConfigParams.FORCE_REMOTE_BUILD] = True
def _get_instance(self):
return self._inst
def _get_java_env(self):
env = super(_DistributedSubmitterCP4DIntegrated, self)._get_java_env()
env.pop('CP4D_URL', None)
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
class _DistributedSubmitterCP4DStandalone(_DistributedSubmitter):
"""
A submitter which supports the CPD standalone configuration.
"""
def __init__(self, config, graph):
super(_DistributedSubmitterCP4DStandalone, self).__init__(config, graph, None, None)
# Look for endpoint set by env vars.
self._inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if self._inst is None:
raise ValueError("Incorrect configuration for Cloud Pak for Data standalone configuration")
self._streams_connection = self._inst.rest_client._sc
self._config()[ConfigParams.FORCE_REMOTE_BUILD] = True
def _get_instance(self):
return self._inst
def _get_java_env(self):
env = super(_DistributedSubmitterCP4DStandalone, self)._get_java_env()
env.pop('CP4D_URL', None)
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
class _DistributedSubmitter4(_DistributedSubmitter):
"""
A submitter which supports the DISTRIBUTED context
for IBM Streams 4.2/4.3.
"""
def __init__(self, config, graph, username, password):
super(_DistributedSubmitter4, self).__init__(config, graph, username, password)
def _get_instance(self):
if not self._streams_connection:
self._streams_connection = streamsx.rest.StreamsConnection(self.username, self.password)
if ConfigParams.SSL_VERIFY in self._config():
self._streams_connection.session.verify = self._config()[ConfigParams.SSL_VERIFY]
return self._streams_connection.get_instance(os.environ['STREAMS_INSTANCE_ID'])
class _DistributedSubmitter4Conn(_DistributedSubmitter4):
"""
A submitter which supports the DISTRIBUTED context
for IBM Streams 4.2/4.3 using a connection.
"""
def __init__(self, config, graph, username, password):
super(_DistributedSubmitter4Conn, self).__init__(config, graph, username, password)
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
self.username = self._streams_connection.session.auth[0]
self.password = self._streams_connection.session.auth[1]
if (username is not None and username != self.username) or (password is not None and password != self.password):
raise RuntimeError('Credentials supplied in the arguments differ than '
'those specified in the StreamsConnection object')
def _get_instance(self):
iid = os.environ.get('STREAMS_INSTANCE_ID')
return self._streams_connection.get_instance(id=iid)
def _get_java_env(self):
env = super(_DistributedSubmitter4Conn, self)._get_java_env()
# Need to sure the environment matches the connection.
sc = self._streams_connection
env['STREAMS_DOMAIN_ID'] = sc.get_domains()[0].id
return env
class _SubmitContextFactory(object):
"""
ContextSubmitter:
Responsible for performing the correct submission depending on a number of factors, including: the
presence/absence of a streams install, the type of context, and whether the user seeks to retrieve data via rest
"""
def __init__(self, graph, config=None, username=None, password=None):
self.graph = graph
self.config = config
self.username = username
self.password = password
if self.config is None:
self.config = {}
def get_submit_context(self, ctxtype):
# If there is no streams install present, currently only ANALYTICS_SERVICE, TOOLKIT, and BUILD_ARCHIVE
# are supported.
streams_install = os.environ.get('STREAMS_INSTALL')
if streams_install is None:
if ctxtype == ContextTypes.STANDALONE:
raise ValueError(ctxtype + " must be submitted when an IBM Streams install is present.")
if ctxtype == ContextTypes.DISTRIBUTED:
logger.debug("Selecting the DISTRIBUTED context for submission")
return _get_distributed_submitter(self.config, self.graph, self.username, self.password)
elif ctxtype == ContextTypes.STREAMING_ANALYTICS_SERVICE:
logger.debug("Selecting the STREAMING_ANALYTICS_SERVICE context for submission")
ctxtype = ContextTypes.STREAMING_ANALYTICS_SERVICE
return _StreamingAnalyticsSubmitter(ctxtype, self.config, self.graph)
elif ctxtype == 'BUNDLE':
logger.debug("Selecting the BUNDLE context for submission")
if 'CP4D_URL' in os.environ:
return _BundleSubmitter(ctxtype, self.config, self.graph)
if 'VCAP_SERVICES' in os.environ or \
ConfigParams.VCAP_SERVICES in self.config or \
ConfigParams.SERVICE_DEFINITION in self.config:
sbs = _SasBundleSubmitter(self.config, self.graph)
if sbs._remote:
return sbs
return _BundleSubmitter(ctxtype, self.config, self.graph)
elif ctxtype == 'EDGE':
logger.debug("Selecting the EDGE context for submission")
return _EdgeSubmitter(ctxtype, self.config, self.graph)
elif ctxtype == 'EDGE_BUNDLE':
logger.debug("Selecting the EDGE_BUNDLE context for submission")
return _EdgeSubmitter(ctxtype, self.config, self.graph)
else:
logger.debug("Using the BaseSubmitter, and passing the context type through to java.")
return _BaseSubmitter(ctxtype, self.config, self.graph)
# Used to delete the JSON file after it is no longer needed.
def _delete_json(submitter):
for fn in [submitter.fn, submitter.results_file]:
if fn and os.path.isfile(fn):
os.remove(fn)
# Used by a thread which polls a subprocess's stdout and writes it to stdout
def _print_process_stdout(process):
try:
while True:
line = process.stdout.readline()
if len(line) == 0:
process.stdout.close()
break
line = line.decode("utf-8").strip()
print(line)
except:
logger.error("Error reading from Java subprocess stdout stream.")
raise
finally:
process.stdout.close()
_JAVA_LOG_LVL = {
# java.util.logging
'SEVERE': logging.ERROR,
'WARNING': logging.WARNING,
'INFO':logging.INFO, 'CONFIG':logging.INFO,
'FINE:':logging.DEBUG, 'FINER':logging.DEBUG, 'FINEST':logging.DEBUG,
'FATAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'DEBUG:':logging.DEBUG, 'TRACE':logging.DEBUG
}
# Used by a thread which polls a subprocess's stderr and writes it to
# a logger or stderr
def _print_process_stderr(process, submitter, progress_fn):
try:
while True:
line = process.stderr.readline()
if len(line) == 0:
process.stderr.close()
break
line = line.decode("utf-8").strip()
em = line.rstrip().split(': ', 1)
if len(em) == 2 and em[0] in _JAVA_LOG_LVL:
if 'INFO' == em[0] and em[1].startswith('!!-streamsx-'):
progress_fn(em[1])
continue
logger.log(_JAVA_LOG_LVL[em[0]], em[1])
continue
print(line, file=sys.stderr)
except:
logger.error("Error reading from Java subprocess stderr stream.")
raise
finally:
process.stderr.close()
class ContextTypes(object):
"""
Submission context types.
A :py:class:`~streamsx.topology.topology.Topology` is submitted using :py:func:`submit` and a context type.
Submision of a `Topology` generally builds the application into a Streams application
bundle (sab) file and then submits it for execution in the required context.
The Streams application bundle contains all the artifacts required by an application such
that it can be executed remotely (e.g. on a Streaming Analytics service), including
distributing the execution of the application across multiple resources (hosts).
The context type defines which context is used for submission.
The main context types result in a running application and are:
* :py:const:`STREAMING_ANALYTICS_SERVICE` - Application is submitted to a Streaming Analytics service running on IBM Cloud.
* :py:const:`DISTRIBUTED` - Application is submitted to an IBM Streams instance.
* :py:const:`STANDALONE` - Application is executed as a local process, IBM Streams `standalone` application. Typically this is used during development or testing.
The :py:const:`BUNDLE` context type compiles the application (`Topology`) to produce a
Streams application bundle (sab file). The bundle is not executed but may subsequently be submitted
to a Streaming Analytics service or an IBM Streams instance. A bundle may be submitted multiple
times to services or instances, each resulting in a unique job (running application).
"""
STREAMING_ANALYTICS_SERVICE = 'STREAMING_ANALYTICS_SERVICE'
"""Submission to Streaming Analytics service running on IBM Cloud.
The `Topology` is compiled and the resultant Streams application bundle
(sab file) is submitted for execution on the Streaming Analytics service.
When **STREAMS_INSTALL** is not set or the :py:func:`submit` `config` parameter has
:py:const:`~ConfigParams.FORCE_REMOTE_BUILD` set to `True` the compilation of the application
occurs remotely by the service. This allows creation and submission of Streams applications
without a local install of IBM Streams.
When **STREAMS_INSTALL** is set and the :py:func:`submit` `config` parameter has
:py:const:`~ConfigParams.FORCE_REMOTE_BUILD` set to `False` or not set then the creation of the
Streams application bundle occurs locally and the bundle is submitted for execution on the service.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_INSTALL** - (optional) Location of a IBM Streams installation (4.0.1 or later). The install must be running on RedHat/CentOS 7 and `x86_64` architecture.
"""
DISTRIBUTED = 'DISTRIBUTED'
"""Submission to an IBM Streams instance.
.. rubric:: IBM Cloud Pak for Data integated configuration
*Projects (within cluster)*
The `Topology` is compiled using the Streams build service and submitted
to an Streams service instance running in the same Cloud Pak for
Data cluster as the Jupyter notebook or script declaring the application.
The instance is specified in the configuration passed into :py:func:`submit`. The code that selects a service instance by name is::
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
submit(ContextTypes.DISTRIBUTED, topo, cfg)
The resultant `cfg` dict may be augmented with other values such as
a :py:class:`JobConfig` or keys from :py:class:`ConfigParams`.
*External to cluster or project*
The `Topology` is compiled using the Streams build service and submitted
to a Streams service instance running in Cloud Pak for Data.
Environment variables:
These environment variables define how the application is built and submitted.
* **CP4D_URL** - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`
* **STREAMS_INSTANCE_ID** - Streams service instance name.
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Cloud Pak for Data standalone configuration
The `Topology` is compiled using the Streams build service and submitted
to a Streams service instance using REST apis.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_BUILD_URL** - Streams build service URL, e.g. when the service is exposed as node port: `https://<NODE-IP>:<NODE-PORT>`
* **STREAMS_REST_URL** - Streams SWS service (REST API) URL, e.g. when the service is exposed as node port: `https://<NODE-IP>:<NODE-PORT>`
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Streams on-premise 4.2 & 4.3
The `Topology` is compiled locally and the resultant Streams application bundle
(sab file) is submitted to an IBM Streams instance.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.2 or 4.3).
* **STREAMS_DOMAIN_ID** - Domain identifier for the Streams instance.
* **STREAMS_INSTANCE_ID** - Instance identifier.
* **STREAMS_ZKCONNECT** - (optional) ZooKeeper connection string for domain (when not using an embedded ZooKeeper)
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
.. warning::
``streamtool`` is used to submit the job with on-premise 4.2 & 4.3 Streams and requires that ``streamtool`` does not prompt for authentication. This is achieved by using ``streamtool genkey``.
.. seealso::
`Generating authentication keys for IBM Streams <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.1/com.ibm.streams.cfg.doc/doc/ibminfospherestreams-user-security-authentication-rsa.html>`_
"""
STANDALONE = 'STANDALONE'
"""Build and execute locally.
Compiles and executes the `Topology` locally in IBM Streams standalone mode as a separate sub-process.
Typically used for devlopment and testing.
The call to :py:func:`submit` return when (if) the application completes. An application
completes when it has finite source streams and all tuples from those streams have been
processed by the complete topology. If the source streams are infinite (e.g. reading tweets)
then the standalone application will not complete.
Environment variables:
This environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.0.1 or later).
"""
BUNDLE = 'BUNDLE'
"""Create a Streams application bundle.
The `Topology` is compiled to produce Streams application bundle (sab file).
The resultant application can be submitted to:
* Streaming Analytics service using the Streams console or the Streaming Analytics REST api.
* IBM Streams instance using the Streams console, JMX api or command line ``streamtool submitjob``.
* Executed standalone for development or testing.
The bundle must be built on the same operating system version and architecture as the intended running
environment. For Streaming Analytics service this is currently RedHat/CentOS 7 and `x86_64` architecture.
.. rubric:: IBM Cloud Pak for Data integated configuration
*Projects (within cluster)*
The `Topology` is compiled using the Streams build service for
a Streams service instance running in the same Cloud Pak for
Data cluster as the Jupyter notebook or script declaring the application.
The instance is specified in the configuration passed into :py:func:`submit`. The code that selects a service instance by name is::
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
submit(ContextTypes.BUNDLE, topo, cfg)
The resultant `cfg` dict may be augmented with other values such as
keys from :py:class:`ConfigParams`.
*External to cluster or project*
The `Topology` is compiled using the Streams build service for a Streams service instance running in Cloud Pak for Data.
Environment variables:
These environment variables define how the application is built and submitted.
* **CP4D_URL** - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`
* **STREAMS_INSTANCE_ID** - Streams service instance name.
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Cloud Pak for Data standalone configuration
The `Topology` is compiled using the Streams build service.
Environment variables:
These environment variables define how the application is built.
* **STREAMS_BUILD_URL** - Streams build service URL, e.g. when the service is exposed as node port: `https://<NODE-IP>:<NODE-PORT>`
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Streams on-premise 4.2 & 4.3
The `Topology` is compiled using a local IBM Streams installation.
Environment variables:
These environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a local IBM Streams installation.
"""
TOOLKIT = 'TOOLKIT'
"""Creates an SPL toolkit.
`Topology` applications are implemented as an SPL application before compilation into an Streams application
bundle. This context type produces the intermediate SPL toolkit that is input to the SPL compiler for
bundle creation.
.. note::
`TOOLKIT` is typically only used when diagnosing issues with bundle generation.
"""
BUILD_ARCHIVE = 'BUILD_ARCHIVE'
"""Creates a build archive.
This context type produces the intermediate code archive used for bundle creation.
.. note::
`BUILD_ARCHIVE` is typically only used when diagnosing issues with bundle generation.
"""
EDGE = 'EDGE'
"""Submission to build service running on IBM Cloud Pak for Data to create an image for Edge.
The `Topology` is compiled and the resultant Streams application bundle
(sab file) is added to an image for Edge.
.. rubric:: IBM Cloud Pak for Data integated configuration
*Projects (within cluster)*
The `Topology` is compiled using the Streams build service for
a Streams service instance running in the same Cloud Pak for
Data cluster as the Jupyter notebook or script declaring the application.
The instance is specified in the configuration passed into :py:func:`submit`. The code that selects a service instance by name is::
from streamsx.topology.context import submit, ContextTypes
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
submit(ContextTypes.EDGE, topo, cfg)
The resultant `cfg` dict may be augmented with other values such as
keys from :py:class:`ConfigParams` or :py:class:`JobConfig`.
For example, apply `imageName` and `imageTag`::
from streamsx.topology.context import submit, ContextTypes, JobConfig
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
jc = JobConfig()
jc.raw_overlay = {'edgeConfig': {'imageName':'py-sample-app', 'imageTag':'v1.0'}}
jc.add(cfg)
submit(ContextTypes.EDGE, topo, cfg)
*External to cluster or project*
The `Topology` is compiled using the Streams build service for a Streams service instance running in Cloud Pak for Data.
Environment variables:
These environment variables define how the application is built and submitted.
* **CP4D_URL** - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`
* **STREAMS_INSTANCE_ID** - Streams service instance name.
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
Example code to query the base images::
from streamsx.build import BuildService
bs = BuildService.of_endpoint(verify=False)
baseImages = bs.get_base_images()
print('# images = ' + str(len(baseImages)))
for i in baseImages:
print(i.id)
print(i.registry)
Example code to select a base image for the image build::
from streamsx.topology.context import submit, ContextTypes, JobConfig
topo = Topology()
...
jc = JobConfig()
jc.raw_overlay = {'edgeConfig': {'imageName':'py-sample-app', 'imageTag':'v1.0', 'baseImage':'streams-base-edge-python-el7:5.3.0.0'}}
jc.add(cfg)
submit(ContextTypes.EDGE, topo, cfg)
.. rubric:: EDGE configuration
The dict *edgeConfig* supports the following fields that are used for the image creation:
* **imageName** - [str] name of the image
* **imageTag** - [str] name of the image tag
* **baseImage** - [str] identify the name of the base image
* **pipPackages** - [list] identify one or more Python install packages that are to be included in the image.
* **rpms** - [list] identify one or more linux RPMs that are to be included in the image
* **locales** - [list] identify one or more locales that are to be included in the image. The first item in the list is the "default" locale. The locales are identified in the java format <language>_<county>_<variant>. Example: "en_US"
Example with adding pip packages and rpms::
jc.raw_overlay = {'edgeConfig': {'imageName': image_name, 'imageTag': image_tag, 'pipPackages':['pandas','numpy'], 'rpms':['atlas-devel']}}
"""
EDGE_BUNDLE = 'EDGE_BUNDLE'
"""Creates a Streams application bundle.
The `Topology` is compiled on build service running on IBM Cloud Pak for Data and the resultant Streams application bundle
(sab file) is downloaded.
.. note::
`EDGE_BUNDLE` is typically only used when diagnosing issues with applications for EDGE.
"""
class ConfigParams(object):
"""
Configuration options which may be used as keys in :py:func:`submit` `config` parameter.
"""
VCAP_SERVICES = 'topology.service.vcap'
"""Streaming Analytics service definitions including credentials in **VCAP_SERVICES** format.
Provides the connection credentials when connecting to a Streaming Analytics service
using context type :py:const:`~ContextTypes.STREAMING_ANALYTICS_SERVICE`.
The ``streaming-analytics`` service to use within the service definitions is identified
by name using :py:const:`SERVICE_NAME`.
The key overrides the environment variable **VCAP_SERVICES**.
The value can be:
* Path to a local file containing a JSON representation of the VCAP services information.
* Dictionary containing the VCAP services information.
.. seealso:: :ref:`sas-vcap`
"""
SERVICE_NAME = 'topology.service.name'
"""Streaming Analytics service name.
Selects the specific Streaming Analytics service from VCAP service definitions
defined by the the environment variable **VCAP_SERVICES** or the key :py:const:`VCAP_SERVICES` in the `submit` config.
.. seealso:: :ref:`sas-service-name`
"""
SPACE_NAME = 'topology.spaceName'
"""
Key for a deployment space on a Cloud Pak for Data, when submitted to :py:const:`DISTRIBUTED`.
When a space name is specified for an application submitted from a project in Cloud Pak for Data,
for example from a Jupyter notebook, the resulting job will not be associated with the project and can
therefore not be found within the project. The job will be associated with a deployment space instead.
When the specified space does not exist, it will be automatically created.
.. versionadded:: 1.17
"""
CP4D_URL = 'topology.cp4d_url'
"""
Key for specifying the URL of the Cloud Pak for Data, when submitted to :py:const:`DISTRIBUTED` from within a CP4D project
.. versionadded:: 1.17
"""
FORCE_REMOTE_BUILD = 'topology.forceRemoteBuild'
"""Force a remote build of the application.
When submitting to :py:const:`STREAMING_ANALYTICS_SERVICE` a local build of the Streams application bundle
will occur if the environment variable **STREAMS_INSTALL** is set. Setting this flag to `True` ignores the
local Streams install and forces the build to occur remotely using the service.
"""
JOB_CONFIG = 'topology.jobConfigOverlays'
"""
Key for a :py:class:`JobConfig` object representing a job configuration for a submission.
"""
STREAMS_CONNECTION = 'topology.streamsConnection'
"""
Key for a :py:class:`StreamsConnection` object for connecting to a running IBM Streams instance. Only supported for Streams 4.2, 4.3. Requires environment
variable ``STREAMS_INSTANCE_ID`` to be set.
"""
SSL_VERIFY = 'topology.SSLVerify'
"""
Key for the SSL verification value passed to `requests` as its ``verify``
option for distributed contexts. By default set to `True`.
.. note:: Only ``True`` or ``False`` is supported. Behaviour is undefined
when passing a path to a CA_BUNDLE file or directory with
certificates of trusted CAs.
.. versionadded:: 1.11
"""
SERVICE_DEFINITION = 'topology.service.definition'
"""Streaming Analytics service definition.
Identifies the Streaming Analytics service to use. The definition can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console).
Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) created from the `service credentials`, for example with `json.loads(service_credentials)`
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": ... }``
with the `service credentials` as the value of the ``credentials`` key. The value of the ``credentials`` key can
be a JSON object (`dict`) or a `str` copied from the `Service credentials` page of the service console.
This key takes precedence over :py:const:`VCAP_SERVICES` and :py:const:`SERVICE_NAME`.
.. seealso:: :ref:`sas-service-def`
"""
SC_OPTIONS = 'topology.sc.options'
"""
Options to be passed to IBM Streams sc command.
A topology is compiled into a Streams application
bundle (`sab`) using the SPL compiler ``sc``.
Additional options to be passed to ``sc``
may be set using this key. The value can be a
single string option (e.g. ``--c++std=c++11`` to select C++ 11 compilation)
or a list of strings for multiple options.
Setting ``sc`` options may be required when invoking SPL operators
directly or testing SPL applications.
.. warning::
Options that modify the requested submission context (e.g. setting
a different main composite) or deprecated options should not be specified.
.. versionadded:: 1.12.10
"""
_SPLMM_OPTIONS = 'topology.internal.splmm_options'
"""
TBD
"""
class JobConfig(object):
"""
Job configuration.
`JobConfig` allows configuration of job that will result from
submission of a :py:class:`Topology` (application).
A `JobConfig` is set in the `config` dictionary passed to :py:func:`~streamsx.topology.context.submit`
using the key :py:const:`~ConfigParams.JOB_CONFIG`. :py:meth:`~JobConfig.add` exists as a convenience
method to add it to a submission configuration.
A `JobConfig` can also be used when submitting a Streams application
bundle through the Streaming Analytics REST API method :py:meth:`~streamsx.rest_primitives.StreamingAnalyticsService.submit_job`.
Args:
job_name(str): The name that is assigned to the job. A job name must be unique within a Streasm instance
When set to `None` a system generated name is used.
job_group(str): The job group to use to control permissions for the submitted job.
preload(bool): Specifies whether to preload the job onto all resources in the instance, even if the job is
not currently needed on each. Preloading the job can improve PE restart performance if the PEs are
relocated to a new resource.
data_directory(str): Specifies the location of the optional data directory. The data directory is a path
within the cluster that is running the Streams instance.
tracing: Specify the application trace level. See :py:attr:`tracing`
space_name(str): Specifies the name of a deployment space on a CloudPak for Data system, which the job is associated with
Example::
# Submit a job with the name NewsIngester
cfg = {}
job_config = JobConfig(job_name='NewsIngester')
job_config.add(cfg)
context.submit('STREAMING_ANALYTICS_SERVICE', topo, cfg)
.. seealso:: `Job configuration overlays reference <https://www.ibm.com/support/knowledgecenter/en/SSCRJU_4.2.1/com.ibm.streams.ref.doc/doc/submitjobparameters.html>`_
"""
def __init__(self, job_name=None, job_group=None, preload=False, data_directory=None, tracing=None, space_name=None):
self.job_name = job_name
self.job_group = job_group
self.preload = preload
self.data_directory = data_directory
self.tracing = tracing
self._space_name = space_name
self._pe_count = None
self._raw_overlay = None
self._submission_parameters = dict()
self._comment = None
@staticmethod
def from_overlays(overlays):
"""Create a `JobConfig` instance from a full job configuration
overlays object.
All logical items, such as ``comment`` and ``job_name``, are
extracted from `overlays`. The remaining information in the
single job config overlay in ``overlays`` is set as ``raw_overlay``.
Args:
overlays(dict): Full job configuration overlays object.
Returns:
JobConfig: Instance representing logical view of `overlays`.
.. versionadded:: 1.9
"""
jc = JobConfig()
jc.comment = overlays.get('comment')
if 'jobConfigOverlays' in overlays:
if len(overlays['jobConfigOverlays']) >= 1:
jco = copy.deepcopy(overlays['jobConfigOverlays'][0])
# Now extract the logical information
if 'jobConfig' in jco:
_jc = jco['jobConfig']
jc.job_name = _jc.pop('jobName', None)
jc.job_group = _jc.pop('jobGroup', None)
jc.preload = _jc.pop('preloadApplicationBundles', False)
jc.data_directory = _jc.pop('dataDirectory', None)
jc.tracing = _jc.pop('tracing', None)
for sp in _jc.pop('submissionParameters', []):
jc.submission_parameters[sp['name']] = sp['value']
if not _jc:
del jco['jobConfig']
if 'deploymentConfig' in jco:
_dc = jco['deploymentConfig']
if 'manual' == _dc.get('fusionScheme'):
if 'fusionTargetPeCount' in _dc:
jc.target_pe_count = _dc.pop('fusionTargetPeCount')
if len(_dc) == 1:
del jco['deploymentConfig']
if jco:
jc.raw_overlay = jco
return jc
@property
def space_name(self):
"""
The deployment space of a Cloud Pak for Data that the job will be associated with.
"""
return self._space_name
@space_name.setter
def space_name(self, space_name):
self._space_name = space_name
@property
def tracing(self):
"""
Runtime application trace level.
The runtime application trace level can be a string with value ``error``, ``warn``, ``info``,
``debug`` or ``trace``.
In addition a level from Python ``logging`` module can be used in with ``CRITICAL`` and ``ERROR`` mapping
to ``error``, ``WARNING`` to ``warn``, ``INFO`` to ``info`` and ``DEBUG`` to ``debug``.
Setting tracing to `None` or ``logging.NOTSET`` will result in the job submission using the Streams instance
application trace level.
The value of ``tracing`` is the level as a string (``error``, ``warn``, ``info``, ``debug`` or ``trace``)
or None.
"""
return self._tracing
@tracing.setter
def tracing(self, level):
if level is None:
pass
elif level in {'error', 'warn', 'info', 'debug', 'trace'}:
pass
elif level == logging.CRITICAL or level == logging.ERROR:
level = 'error'
elif level == logging.WARNING:
level = 'warn'
elif level == logging.INFO:
level = 'info'
elif level == logging.DEBUG:
level = 'debug'
elif level == logging.NOTSET:
level = None
else:
raise ValueError("Tracing value {0} not supported.".format(level))
self._tracing = level
@property
def target_pe_count(self):
"""Target processing element count.
When submitted against a Streams instance `target_pe_count` provides
a hint to the scheduler as to how to partition the topology
across processing elements (processes) for the job execution. When a job
contains multiple processing elements (PEs) then the Streams scheduler can
distributed the PEs across the resources (hosts) running in the instance.
When set to ``None`` (the default) no hint is supplied to the scheduler.
The number of PEs in the submitted job will be determined by the scheduler.
The value is only a target and may be ignored when the topology contains
:py:meth:`~Stream.isolate` calls.
.. note::
Only supported in Streaming Analytics service and IBM Streams 4.2 or later.
"""
if self._pe_count is None:
return None
return int(self._pe_count)
@target_pe_count.setter
def target_pe_count(self, count):
if count is not None:
count = int(count)
if count < 1:
raise ValueError("target_pe_count must be greater than 0.")
self._pe_count = count
@property
def raw_overlay(self):
"""Raw Job Config Overlay.
A submitted job is configured using Job Config Overlay which
is represented as a JSON. `JobConfig` exposes Job Config Overlay
logically with properties such as ``job_name`` and ``tracing``.
This property (as a ``dict``) allows merging of the
configuration defined by this object and raw representation
of a Job Config Overlay. This can be used when a capability
of Job Config Overlay is not exposed logically through this class.
For example, the threading model can be set by::
jc = streamsx.topology.context.JobConfig()
jc.raw_overlay = {'deploymentConfig': {'threadingModel': 'manual'}}
Any logical items set by this object **overwrite** any set with
``raw_overlay``. For example this sets the job name to
to value set in the constructor (`DBIngest`) not the value
in ``raw_overlay`` (`Ingest`)::
jc = streamsx.topology.context.JobConfig(job_name='DBIngest')
jc.raw_overlay = {'jobConfig': {'jobName': 'Ingest'}}
.. note:: Contents of ``raw_overlay`` is a ``dict`` that is
must match a single Job Config Overlay and be serializable
as JSON to the correct format.
.. seealso:: `Job Config Overlay reference <https://www.ibm.com/support/knowledgecenter/en/SSCRJU_4.2.1/com.ibm.streams.ref.doc/doc/submitjobparameters.html>`_
.. versionadded:: 1.9
"""
return self._raw_overlay
@raw_overlay.setter
def raw_overlay(self, raw):
self._raw_overlay = raw
@property
def submission_parameters(self):
"""Job submission parameters.
Submission parameters values for the job. A `dict` object
that maps submission parameter names to values.
.. versionadded:: 1.9
"""
return self._submission_parameters
@property
def comment(self):
"""
Comment for job configuration.
The comment does not change the functionality of the job configuration.
Returns:
str: Comment text, `None` if it has not been set.
.. versionadded:: 1.9
"""
return self._comment
@comment.setter
def comment(self, value):
if value:
self._comment = str(value)
else:
self._comment = None
def add(self, config):
"""
Add this `JobConfig` into a submission configuration object.
Args:
config(dict): Submission configuration.
Returns:
dict: config.
"""
config[ConfigParams.JOB_CONFIG] = self
if self.space_name:
config[ConfigParams.SPACE_NAME] = self.space_name
return config
def as_overlays(self):
"""Return this job configuration as a complete job configuration overlays object.
Converts this job configuration into the full format supported by IBM Streams.
The returned `dict` contains:
* ``jobConfigOverlays`` key with an array containing a single job configuration overlay.
* an optional ``comment`` key containing the comment ``str``.
For example with this ``JobConfig``::
jc = JobConfig(job_name='TestIngester')
jc.comment = 'Test configuration'
jc.target_pe_count = 2
the returned `dict` would be::
{"comment": "Test configuration",
"jobConfigOverlays":
[{"jobConfig": {"jobName": "TestIngester"},
"deploymentConfig": {"fusionTargetPeCount": 2, "fusionScheme": "manual"}}]}
The returned overlays object can be saved as JSON in a file
using ``json.dump``. A file can be used with job submission
mechanisms that support a job config overlays file, such as
``streamtool submitjob`` or the IBM Streams console.
Example of saving a ``JobConfig`` instance as a file::
jc = JobConfig(job_name='TestIngester')
with open('jobconfig.json', 'w') as f:
json.dump(jc.as_overlays(), f)
Returns:
dict: Complete job configuration overlays object built from this object.
.. versionadded:: 1.9
"""
return self._add_overlays({})
def _add_overlays(self, config):
"""
Add this as a jobConfigOverlays JSON to config.
"""
if self._comment:
config['comment'] = self._comment
jco = {}
config["jobConfigOverlays"] = [jco]
if self._raw_overlay:
jco.update(self._raw_overlay)
jc = jco.get('jobConfig', {})
if self.job_name is not None:
jc["jobName"] = self.job_name
if self.job_group is not None:
jc["jobGroup"] = self.job_group
if self.data_directory is not None:
jc["dataDirectory"] = self.data_directory
if self.preload:
jc['preloadApplicationBundles'] = True
if self.tracing is not None:
jc['tracing'] = self.tracing
if self.submission_parameters:
sp = jc.get('submissionParameters', [])
for name in self.submission_parameters:
sp.append({'name': str(name), 'value': self.submission_parameters[name]})
jc['submissionParameters'] = sp
if jc:
jco["jobConfig"] = jc
if self.target_pe_count is not None and self.target_pe_count >= 1:
deployment = jco.get('deploymentConfig', {})
deployment.update({'fusionScheme' : 'manual', 'fusionTargetPeCount' : self.target_pe_count})
jco["deploymentConfig"] = deployment
return config
class SubmissionResult(object):
"""Passed back to the user after a call to submit.
Allows the user to use dot notation to access dictionary elements.
Example accessing result files when using :py:const:`~ContextTypes.BUNDLE`::
submission_result = submit(ContextTypes.BUNDLE, topology, config)
print(submission_result.bundlePath)
...
os.remove(submission_result.bundlePath)
os.remove(submission_result.jobConfigPath)
Result contains the generated toolkit location when using :py:const:`~ContextTypes.TOOLKIT`::
submission_result = submit(ContextTypes.TOOLKIT, topology, config)
print(submission_result.toolkitRoot)
Result when using :py:const:`~ContextTypes.DISTRIBUTED` depends if the `Topology` is compiled locally and the resultant Streams application bundle
(sab file) is submitted to an IBM Streams instance or if the `Topology` is compiled on build-service and submitted to an instance in Cloud Pak for Data::
submission_result = submit(ContextTypes.DISTRIBUTED, topology, config)
print(submission_result)
Result contains the generated `image`, `imageDigest`, `submitMetrics` (building the bundle), `submitImageMetrics` (building the image) when using :py:const:`~ContextTypes.EDGE`::
submission_result = submit(ContextTypes.EDGE, topology, config)
print(submission_result.image)
print(submission_result.imageDigest)
"""
def __init__(self, results):
self.results = results
self._submitter = None
@property
def job(self):
"""REST binding for the job associated with the submitted build.
Returns:
Job: REST binding for running job or ``None`` if connection information was not available or no job was submitted.
"""
if self._submitter and hasattr(self._submitter, '_job_access'):
return self._submitter._job_access()
return None
def cancel_job_button(self, description=None):
"""Display a button that will cancel the submitted job.
Used in a Jupyter IPython notebook to provide an interactive
mechanism to cancel a job submitted from the notebook.
Once clicked the button is disabled unless the cancel fails.
A job may be cancelled directly using::
submission_result = submit(ctx_type, topology, config)
submission_result.job.cancel()
Args:
description(str): Text used as the button description, defaults to value based upon the job name.
.. warning::
Behavior when called outside a notebook is undefined.
.. versionadded:: 1.12
"""
if not hasattr(self, 'jobId'):
return
try:
# Verify we are in a IPython env.
get_ipython() # noqa : F821
import ipywidgets as widgets
if not description:
description = 'Cancel job: '
description += self.name if hasattr(self, 'name') else self.job.name
button = widgets.Button(description=description,
button_style='danger',
layout=widgets.Layout(width='40%'))
out = widgets.Output()
vb = widgets.VBox([button, out])
@out.capture(clear_output=True)
def _cancel_job_click(b):
b.disabled=True
print('Cancelling job: id=' + str(self.job.id) + ' ...\n', flush=True)
try:
rc = self.job.cancel()
out.clear_output()
if rc:
print('Cancelled job: id=' + str(self.job.id) + ' : ' + self.job.name + '\n', flush=True)
else:
print('Job already cancelled: id=' + str(self.job.id) + ' : ' + self.job.name + '\n', flush=True)
except:
b.disabled=False
out.clear_output()
raise
button.on_click(_cancel_job_click)
display(vb) # noqa : F821
except:
pass
def __getattr__(self, key):
if key in self.__getattribute__("results"):
return self.results[key]
return self.__getattribute__(key)
def __setattr__(self, key, value):
if "results" in self.__dict__:
results = self.results
results[key] = value
else:
super(SubmissionResult, self).__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def __setitem__(self, key, value):
return self.__setattr__(key, value)
def __delitem__(self, key):
if key in self.__getattribute__("results"):
del self.results[key]
return
self.__delattr__(key)
def __contains__(self, item):
return item in self.results
def __repr__(self):
r = copy.copy(self.results)
if 'streamsConnection' in r:
del r['streamsConnection']
return r.__repr__()
def _vcap_from_service_definition(service_def):
"""Turn a service definition into a vcap services
containing a single service.
"""
if 'credentials' in service_def:
credentials = service_def['credentials']
else:
credentials = service_def
service = {}
service['credentials'] = credentials if isinstance(credentials, dict) else json.loads(credentials)
service['name'] = _name_from_service_definition(service_def)
vcap = {'streaming-analytics': [service]}
return vcap
def _name_from_service_definition(service_def):
return service_def['name'] if 'credentials' in service_def else 'service'
class _SasBundleSubmitter(_BaseSubmitter):
"""
A submitter which supports the BUNDLE context
for Streaming Analytics service.
"""
def __init__(self, config, graph):
_BaseSubmitter.__init__(self, 'SAS_BUNDLE', config, graph)
self._remote = config.get(ConfigParams.FORCE_REMOTE_BUILD) or \
not 'STREAMS_INSTALL' in os.environ
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_SasBundleSubmitter, self)._get_java_env()
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
def run(topology, config=None, job_name=None, verify=None, ctxtype=ContextTypes.DISTRIBUTED):
"""
Run a topology in a distributed Streams instance.
Runs a topology using :py:func:`submit` with context type :py:const:`~ContextTypes.DISTRIBUTED` (by default). The result is running Streams job.
Args:
topology(Topology): Application topology to be run.
config(dict): Configuration for the build.
job_name(str): Optional job name. If set will override any job name in `config`.
verify: SSL verification used by requests when using a build service. Defaults to enabling SSL verification.
ctxtype(str): Context type for submission.
Returns:
2-element tuple containing
- **job** (*Job*): REST binding object for the running job or ``None`` if no job was submitted.
- **result** (*SubmissionResult*): value returned from ``submit``.
.. seealso:: :py:const:`~ContextTypes.DISTRIBUTED` for details on how to configure the Streams instance to use.
.. versionadded:: 1.14
"""
config = config.copy() if config else dict()
if job_name:
if ConfigParams.JOB_CONFIG in config:
# Ensure the original is not changed
jc = JobConfig.from_overlays(config[ConfigParams.JOB_CONFIG].as_overlays())
jc.job_name = job_name
jc.add(config)
else:
JobConfig(job_name=job_name).add(config)
if verify is not None:
config[ConfigParams.SSL_VERIFY] = verify
sr = submit(ctxtype, topology, config=config)
return sr.job, sr
| apache-2.0 |
girish946/plot-cat | setup.py | 1 | 1808 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
description = '''
plot-cat is the python library for plotting live serial input. plotcat works on
python 2.7 and later. plotcat comes handy when you want to plot live data that
is coming form different sensors over the serial port, SPI, websocket, tcp socket etc.
For example you have to plot the output of a temperature sensor that is coming
from an [arduino](https://www.arduino.cc/) or any other microcontroller for
that matter; plotcat comes handy for such tasks.
plotcat sits on the top of [matplotlib](http://matplotlib.org/) and does all
the initialization and drawing stuff itself. you just have to provide the list
of values to be plotted.
plotcat works on linux osx and windows. plotcat also works well with
[Raspberry Pi](https://www.raspberrypi.org/)
## install plotcat
from pip
```bash
pip install plotcat
```
or from github
```bash
git clone https://github.com/girish946/plot-cat.git
pip install -r requirements.txt
python install setup.py install
```
on fedora 22 and above
```bash
dnf copr enable girish946/plotcat
dnf install python-plotcat
```
for ubuntu
```bash
wget https://github.com/girish946/plot-cat/blob/master/dist/python-plotcat_1.0.0.1-1_all.deb
sudo dpkg -i python-plotcat_1.0.0.1-1_all.deb
```
'''
setup(
name = 'plotcat',
version = '1.0.5',
author = 'girish joshi',
author_email = '[email protected]',
description = ("""tool to plot live serial input"""),
packages = ['plotcat'],
install_requires = ['matplotlib', 'pyserial'],
keywords = 'serial input plotting realtime data matplotlib Raspberry_pi ',
scripts = ['live_plot.py'],
long_description = """tool to plot live serial input""",
license="GPL v3",
)
| gpl-3.0 |
why2012/imgRecog | scantronAnalyzeCV.py | 1 | 35087 | #!/usr/bin/env python
#coding:utf-8
import cv2
import numpy as np
from sklearn.cluster import KMeans
def readImg(path):
return cv2.imread(path)
def grayImg(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 图片宽度对应数组列数,高度对应数组行数
def getImgSize(img):
return (img.shape[1], img.shape[0])
def swap(a, b):
return b, a
# 根据宽高创建纯白图,宽高对应数组的列数和行数
def createWhiteImg(size):
return np.uint8(np.ones((size[1], size[0])) * 255)
def createBlackImg(size):
return np.uint8(np.zeros((size[1], size[0])))
def findContours(grayImg, mode = cv2.RETR_EXTERNAL, method = cv2.CHAIN_APPROX_SIMPLE):
_, contours, hierarchy = cv2.findContours(grayImg, mode, method)
return contours, hierarchy
def drawContours(img, contours, color = (0, 0, 0), thickness = -1):
cv2.drawContours(img, contours, -1, color, thickness)
return img
def binaryInv(grayImg):
thresh, bimg = cv2.threshold(grayImg, 0, 255, cv2.THRESH_OTSU)
return bimg
def binaryThresh(grayImg, thresh1 = 0, thresh2 = 255, mode = cv2.THRESH_BINARY):
thresh, bimg = cv2.threshold(grayImg, thresh1, thresh2, mode)
return bimg
def getKernel(size):
kernel = np.uint8(np.zeros(size))
kw, kh = kernel.shape
for x in range(kh):
kernel[x, kw / 2] = 1
for x in range(kw):
kernel[kh / 2, x] = 1
return kernel
# 将矩形定点按top-left, top-right, bottom-right, bottom-left顺序排列
def order_points(pts):
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def erosion(grayImg, kernel = getKernel((4, 4)), iterations = 1):
return cv2.erode(grayImg, kernel, iterations = iterations)
def dilation(grayImg, kernel = getKernel((4, 4)), iterations = 1):
return cv2.dilate(grayImg, kernel, iterations = iterations)
def sobel(grayImg):
gradX = cv2.Sobel(grayImg, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(grayImg, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
return gradient
def showImg(*imgs):
index = 0
for img in imgs:
cv2.imshow("img" + str(index), img)
index += 1
cv2.waitKey(10)
cv2.destroyAllWindows()
def showImgSingle(img, wname = "img"):
cv2.imshow(wname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 获取轮廓的包围矩形
def getBoundingRect(contours):
boundingRect = []
for contour in contours:
boundingRect.append(cv2.boundingRect(contour))
return boundingRect
# lines[ line[ subline[]], ...]
def drawLines(img, lines, color = (0, 0, 0), thickness = 1):
for line in lines:
for subline in line:
cv2.line(img, (subline[0], subline[1]), (subline[2], subline[3]), color, thickness)
# boundingBox = [x, y, w, h] -> [x1, y1, x2, y2]
def convertBoundingBoxToBox(boundingBox):
realBox = []
for box in boundingBox:
realBox.append([box[0], box[1], box[0] + box[2], box[1] + box[3]])
return realBox
def drawBox(img, boxes, color = (0, 0, 0), thickness = 1):
for box in boxes:
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, thickness)
def drawRectP4(img, points, color = (0, 0, 0), thickness = 1):
lines = [[points[0], points[1]], [points[1], points[2]], [points[2], points[3]], [points[3], points[0]]]
for line in lines:
cv2.line(img, (line[0][0], line[0][1]), (line[1][0], line[1][1]), color, thickness)
# 从极坐标获取线段端点
def getLinesFromPolarCoord(polarLines, thresh = 4000):
lines = []
for rhoThetaPack in polarLines:
subline = []
for rho,theta in rhoThetaPack:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + thresh * (-b))
y1 = int(y0 + thresh * (a))
x2 = int(x0 - thresh * (-b))
y2 = int(y0 - thresh * (a))
subline.append([x1, y1, x2, y2])
lines.append(subline)
return lines
# 找出面积大于一定阈值的矩形
def findMainBox(boundingBox, size, thresh = 2):
boxes = []
S = size[0] * size[1]
for box in boundingBox:
for x1, y1, x2, y2 in box:
w = np.abs(x2 - x1)
h = np.abs(y2 - y1)
s = w * h
if s > S / thresh:
boxes.append(box)
return boxes
# 将短线段加长, d == 0(垂直)
def expandLine(lines, thresh):
newLines = []
if lines is None:
return newLines
for sublines in lines:
newSublines = []
for x1, y1, x2, y2 in sublines:
if x1 > x2:
x1, x2 = swap(x1, x2)
y1, y2 = swap(y1, y2)
newLine = [0] * 4
d1 = float(x2 - x1)
d2 = float(y2 - y1)
# 垂直
if d1 >= -5 and d1 <= 5:
newLine = [x1, y1, x2, y2]
f = newLine[1] - newLine[3]
f = int(float(f) / np.abs(f))
newLine[1] = newLine[1] + f * thresh
newLine[3] = newLine[3] - f * thresh
else:
deltaY1 = d2 / d1 * x1
deltaY2 = d2 / d1 * thresh
newLine[0] = 0
newLine[1] = int(y1 - deltaY1)
newLine[2] = int(x2 + thresh)
newLine[3] = int(y2 + deltaY2)
newSublines.append(newLine)
newLines.append(newSublines)
return newLines
# 保留一定角度的直线, 自动取角度绝对值
def remainLine(lines, angles = [[0, 10], [80, 90]]):
if lines is None:
return lines
PI_DEGREE = 180.0 / np.pi
newLines = []
for sublines in lines:
newSublines = []
for x1, y1, x2, y2 in sublines:
newLine = [x1, y1, x2, y2]
if x1 - x2 == 0:
theta = 90
else:
theta = np.arctan(np.abs(float(y1 - y2) / (x1 - x2))) * PI_DEGREE
for angle in angles:
if theta >= angle[0] and theta <= angle[1]:
newSublines.append(newLine)
break
if len(newSublines) != 0:
newLines.append(newSublines)
return newLines
# 计算两直线夹角
def computeLineAngle(line1, line2):
dx1 = line1[2] - line1[0]
dx2 = line2[2] - line2[0]
dy1 = line1[3] - line1[1]
dy2 = line2[3] - line2[1]
dx1 /= 100
dx2 /= 100
dy1 /= 100
dy2 /= 100
theta = np.arccos((dx1 * dx2 + dy1 * dy2) / np.sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2)))
theta = theta * 180.0 / np.pi
return theta
# 两条直线交点
# segment==True, 求线段交点, 允许thresh大小的误差
def computeIntersect(a, b, segment = False, thresh = 100):
x1 = a[0]; y1 = a[1]; x2 = a[2]; y2 = a[3]; x3 = b[0]; y3 = b[1]; x4 = b[2]; y4 = b[3]
d = float((x1 - x2) * (y3 - y4)) - (y1 - y2) * (x3 - x4)
if d != 0:
pt = [0, 0]
pt[0] = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / d
pt[1] = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / d
if segment:
x = [x1, x2, x3, x4]
y = [y1, y2, y3, y4]
x.remove(min(x))
x.remove(max(x))
y.remove(min(y))
y.remove(max(y))
x.sort()
y.sort()
x[0] -= thresh
x[1] += thresh
y[0] -= thresh
y[1] += thresh
if (pt[0] < x[0] or pt[0] > x[1]) or (pt[1] < y[0] or pt[1] > y[1]):
pt = [-1, -1]
return pt
else:
return [-1, -1]
# 线段交点
# thresh1可以调整交点超出边界的程度
def getIntersectPoints(lines, filterSize = None, thresh1 = 0):
length = len(lines)
points = []
for i in range(length):
for n in range(i, length):
# 防止近似平行线相交
angle = computeLineAngle(lines[i][0], lines[n][0])
thresh0 = 60
if angle <= thresh0 or angle >= 180 - thresh0:
continue
point = computeIntersect(lines[i][0], lines[n][0], True)
if point[0] != -1:
if filterSize:
if point[0] >= 0 - thresh1 and point[0] <= filterSize[0] + thresh1 and point[1] >= 0 - thresh1 and point[1] <= filterSize[1] + thresh1:
points.append(point)
else:
points.append(point)
return points
# 边角点
def getBoundingCornerPoints(points, size):
points = np.array(points)
centerP = np.array([size[0] / 2.0, size[1] / 2.0])
topLeftPoint = [[0, 0], 0]
topRightPoint = [[size[0], 0], 0]
bottomRightPoint = [[size[0], size[1]], 0]
bottomLeftPoint = [[0, size[1]], 0]
for point in points:
length = np.sum((point - centerP) * (point - centerP))
# topLeft
if point[0] <= centerP[0] and point[1] <= centerP[1]:
if length > topLeftPoint[1]:
topLeftPoint[0] = point
topLeftPoint[1] = length
# topRight
elif point[0] >= centerP[0] and point[1] <= centerP[1]:
if length > topRightPoint[1]:
topRightPoint[0] = point
topRightPoint[1] = length
# bottomRight
elif point[0] >= centerP[0] and point[1] >= centerP[1]:
if length > bottomRightPoint[1]:
bottomRightPoint[0] = point
bottomRightPoint[1] = length
# bottomLeft
else:
if length > bottomLeftPoint[1]:
bottomLeftPoint[0] = point
bottomLeftPoint[1] = length
return np.array([topLeftPoint[0], topRightPoint[0], bottomRightPoint[0], bottomLeftPoint[0]], dtype = np.float32)
# 求众数, 允许+-thresh误差, 若merge != 0, 则分别把+-前merge个连续符号的统计结果合并, 并且只返回合并后的+-两个结果
# [(elem, [count, [indexes]]), ...]
# merge: [([elem1, elem2, ...], [count, [indexes]]), ...]
def findMode(data, thresh = 1, merge = 2):
# {[count, [indexes]]}
elemMap = {}
for index, elem in enumerate(data):
finded = False
if elem in elemMap:
elemMap[elem][0] += 1
elemMap[elem][1].append(index)
finded = True
elif thresh != 0:
if elem + thresh in elemMap:
elemMap[elem + thresh][0] += 1
elemMap[elem + thresh][1].append(index)
finded = True
if elem - thresh in elemMap:
elemMap[elem - thresh][0] += 1
elemMap[elem - thresh][1].append(index)
finded = True
if not finded:
elemMap[elem] = [1, [index]]
# [(elem, [count, [indexes]])]
elemMap = sorted(elemMap.items(), key=lambda e:e[1][0], reverse=True)
if merge != 0:
if merge < 0:
merge = max(0, len(elemMap) + merge)
newElemMap = [([], [0, []]), ([], [0, []])]
for index, modeR in enumerate(elemMap):
if modeR[0] >= 0:
newElemMap[0][0].append(modeR[0])
newElemMap[0][1][0] += modeR[1][0]
newElemMap[0][1][1].extend(modeR[1][1])
else:
newElemMap[1][0].append(modeR[0])
newElemMap[1][1][0] += modeR[1][0]
newElemMap[1][1][1].extend(modeR[1][1])
if index >= merge:
break
elemMap = newElemMap
return elemMap
# 直线倾角
def findAngle(lines):
angles = []
if lines is None:
return angles
for sublines in lines:
for x1, y1, x2, y2 in sublines:
length = np.sqrt((y1 - y2) * (y1 - y2) + (x1 - x2) * (x1 - x2))
angle = np.arcsin((y1 - y2) / length) * 180 / np.pi
angles.append(np.round(angle))
return angles
def getSlideWindow(arr, windowIndex, unitSize):
if len(arr) < unitSize or windowIndex >= len(arr) or windowIndex < 0:
return []
if windowIndex + unitSize - 1 < len(arr):
return arr[windowIndex: unitSize + windowIndex]
else:
windowArr = []
windowArr.extend(arr[windowIndex: len(arr)])
leftCount = unitSize - (len(arr) - windowIndex);
windowArr.extend(arr[0: leftCount])
return windowArr
# 畸变矫正幅度
def getSkewScale(topLeft, topRight, bottomRight, bottomLeft):
topAvg = 0.5 * (topLeft[1] + topRight[1])
bottomAvg = 0.5 * (bottomLeft[1] + bottomRight[1])
leftAvg = 0.5 * (topLeft[0] + bottomLeft[0])
rightAvg = 0.5 * (topRight[0] + bottomRight[0])
# 四个点畸变值, 左上,右上,右下,左下
# return [[topLeft[0] - leftAvg, topLeft[1] - topAvg], [topRight[0] - rightAvg, topRight[1] - topAvg], [bottomRight[0] - rightAvg, bottomRight[1] - bottomAvg], [bottomLeft[0] - leftAvg, bottomLeft[1] - bottomAvg]]
# 只保留上方畸变值
# return [[0, topLeft[1] - topAvg], [0, topRight[1] - topAvg], [0, topRight[1] - topAvg], [0, topLeft[1] - topAvg]]
# 只保留平均畸变值
A = ((topLeft[0] - leftAvg) + (topRight[0] - rightAvg)) / 2.0
B = ((bottomRight[0] - rightAvg) + (bottomLeft[0] - leftAvg)) / 2.0
C = ((topLeft[1] - topAvg) + (bottomLeft[1] - bottomAvg)) / 2.0
D = ((topRight[1] - topAvg) + (bottomRight[1] - bottomAvg)) / 2.0
# 取消X方向畸变, 针对图像内凹或外凸的情况, 平整图像不需要取消X方向畸变
A = B = 0
return [[A, C], [A, D], [B, D], [B, C]]
def determineBoxRatio(c1, c2, c3, c4, whRatio, thresh = 0.2):
# c1 = np.array(c1)
# c2 = np.array(c2)
# c3 = np.array(c3)
# c4 = np.array(c4)
# c = [c1, c2, c3, c4]
# # 四个圆心距原点距离
# len1 = np.sum(c1 * c1)
# len2 = np.sum(c2 * c2)
# len3 = np.sum(c3 * c3)
# len4 = np.sum(c4 * c4)
# lenTotal = [len1, len2, len3, len4]
# # 最小和最大距离分别为左上点和右下点
# minLenIndex = np.argmin(lenTotal)
# maxLenIndex = np.argmax(lenTotal)
# leftIndex = [0, 1, 2, 3]
# leftIndex.remove(minLenIndex)
# leftIndex.remove(maxLenIndex)
# cLeft1 = c[leftIndex[0]]
# cLeft2 = c[leftIndex[1]]
# # 剩下两个点,y坐标大的为左下点
# if cLeft1[1] > cLeft2[1]:
# cLeft1, cLeft2 = (cLeft2, cLeft1)
# topLeft = np.array((c[minLenIndex][0], c[minLenIndex][1]))
# topRight = np.array((cLeft1[0], cLeft1[1]))
# bottomRight = np.array((c[maxLenIndex][0], c[maxLenIndex][1]))
# bottomLeft = np.array((cLeft2[0], cLeft2[1]))
topLeft = np.array([c1[0], c1[1]])
topRight = np.array([c2[0], c2[1]])
bottomRight = np.array([c3[0], c3[1]])
bottomLeft = np.array([c4[0], c4[1]])
# 上下,左右,宽高
w1 = np.sum((topRight - topLeft) * (topRight - topLeft))
w2 = np.sum((bottomRight - bottomLeft) * (bottomRight - bottomLeft))
h1 = np.sum((bottomLeft - topLeft) * (bottomLeft - topLeft))
h2 = np.sum((bottomRight - topRight) * (bottomRight - topRight))
# 宽高比
staRatio = whRatio * whRatio
ratio1 = w1 / h1
ratio2 = w2 / h2
ratio3 = w1 / h2
ratio4 = w2 / h1
# diagLength = w * w + h * h
diagLength1 = np.sum((topLeft - bottomRight) * (topLeft - bottomRight))
diagLength2 = np.sum((topRight - bottomLeft) * (topRight - bottomLeft))
# 宽高比判定
whRatioBool = (ratio1 >= staRatio - thresh and ratio1 <= staRatio + thresh) and (ratio2 >= staRatio - thresh and ratio2 <= staRatio + thresh) \
and (ratio3 >= staRatio - thresh and ratio3 <= staRatio + thresh) and (ratio4 >= staRatio - thresh and ratio4 <= staRatio + thresh)
# 对角线绝对长度
# diagAbsoluteLengthBool = (diagLength1 >= diagLength * (1 - thresh) and diagLength1 <= diagLength * (1 + thresh)) and (diagLength2 >= diagLength * (1 - thresh) and diagLength2 <= diagLength * (1 + thresh))
# 对角线相对长度
diagLengthRatio = diagLength1 / diagLength2
diagLengthRatioBool = (diagLengthRatio >= 1 - thresh) and (diagLengthRatio <= 1 + thresh)
# 半径方差
radiusArr = np.array((c1[2], c2[2], c3[2], c4[2]))
radiusVar = np.sum(np.power(radiusArr - np.array([np.average(radiusArr)] * 4), 2))
# if diagLengthRatioBool or True:
# print c1,c2,c3,c4
# print " topLeft, topRight: ", [topLeft.tolist(), topRight.tolist()]
# print "bottomLeft, bottomRight: ", [bottomLeft.tolist(), bottomRight.tolist()]
# print "w1, w2: ", [w1, w2]
# print "h1, h2: ", [h1, h2]
# print "staRatio: ", staRatio
# print "ratio1(w1/h1), ratio2(w2/h2), ratio3(w1/h2), ratio4(w2/h1): ", [ratio1, ratio2, ratio3, ratio4]
# print "ratioBool: ", whRatioBool
# # print "diagLength: ", diagLength
# print "diagLength1, diagLength2, diagLengthRatio", [diagLength1, diagLength2, diagLengthRatio]
# print "diagLengthRatioBool: ", diagLengthRatioBool
# print "----------"
# 宽高比与对角线长度
if whRatioBool and diagLengthRatioBool:
# 差异度
difference = np.abs(diagLengthRatio - thresh) + radiusVar
return True, (topLeft, topRight, bottomRight, bottomLeft), getSkewScale(topLeft, topRight, bottomRight, bottomLeft), difference
else:
return False, (), [], float('inf')
def determingCorrectCircles(circles, whRatio):
if len(circles) < 4:
return [], (), []
# for currentCircleIndex, currentCircle in enumerate(circles):
# leftCircles = circles[range(currentCircleIndex + 1, len(circles))].tolist();
# leftCircles.extend(circles[range(0, currentCircleIndex)])
# c1 = currentCircle
# for windowIndex in range(len(leftCircles)):
# c2, c3, c4 = getSlideWindow(leftCircles, windowIndex, 3)
# result, corners, skewScale = determineBoxRatio((c1[0], c1[1]), (c2[0], c2[1]), (c3[0], c3[1]), (c4[0], c4[1]), whRatio)
# if result:
# return corners, (c1, c2, c3, c4), skewScale
# return [], (), []
# 分别获取四个象限的点
topLeftCircles = []
topRightCircles = []
bottomLeftCircles = []
bottomRightCircles = []
minX = np.min(circles[:, 0])
maxX = np.max(circles[:, 0])
minY = np.min(circles[:, 1])
maxY = np.max(circles[:, 1])
centerX = (minX + maxX) / 2.0
centerY = (minY + maxY) / 2.0
for curCircle in circles:
x, y, r = curCircle
if x <= centerX and y <= centerY:
topLeftCircles.append(curCircle)
elif x >= centerX and y <= centerY:
topRightCircles.append(curCircle)
elif x >= centerX and y >= centerY:
bottomRightCircles.append(curCircle)
else:
bottomLeftCircles.append(curCircle)
# print topLeftCircles
# print topRightCircles
# print bottomRightCircles
# print bottomLeftCircles
correctResult = []
for circleTopLeft in topLeftCircles:
for circleTopRight in topRightCircles:
for circleBottomRight in bottomRightCircles:
for circleBottomLeft in bottomLeftCircles:
result, corners, skewScale, difference = determineBoxRatio(circleTopLeft, circleTopRight, circleBottomRight, circleBottomLeft, whRatio)
if result:
correctResult.append({"diff": difference, "corners": corners, "skewScale": skewScale, \
"circleTopLeft": circleTopLeft, "circleTopRight": circleTopRight, "circleBottomRight": circleBottomRight, \
"circleBottomLeft": circleBottomLeft})
#print correctResult
if correctResult:
# 选取差异度最小的candidate
correctResult = sorted(correctResult, key = lambda a: a["diff"])
minDiffResult = correctResult[0]
return minDiffResult["corners"], (minDiffResult["circleTopLeft"], minDiffResult["circleTopRight"], minDiffResult["circleBottomRight"], minDiffResult["circleBottomLeft"]), minDiffResult["skewScale"]
return [], (), []
# main function, paperW,paperH: 目标区域宽高(相对比例), blockList = [(0.3,0.3,0.5,0.5), ]左上右下
def houghTestCircle(originalImg, paperW, paperH, blockList = None, scaleThresh = 0.3):
imgSize = getImgSize(originalImg)
w, h = imgSize
# 按比例缩放
w, h = (np.int(w * scaleThresh), np.int(h * scaleThresh))
# 调试
dw, dh = (int(paperW * scaleThresh * 2), int(paperH * scaleThresh * 2))
minWH = np.min((w, h))
originalImg = cv2.resize(originalImg, (w, h))
img = grayImg(originalImg)
# 调试, 画圆
imgColor = originalImg.copy()
# 调试, 画圆
imgColor02 = originalImg.copy()
# 切割结果
splitArea = np.array([])
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, minWH * 0.08, param1 = 60, param2 = 20, minRadius = int(np.ceil(3 * scaleThresh)), maxRadius = int(50 * scaleThresh))
# 只取半径大于平均值的圆
avgRadius = np.average(circles[0, :, 2]) * 0.9
circles = np.array([circles[0, circles[0, :, 2] >= avgRadius]])
# 调试,轮廓
img = cv2.Canny(img, 15, 60, apertureSize = 3)
# 确定四个边角圆
# skewScale: 边界框四个角畸变值, 减去即可矫正畸变
corners, correctCircles, skewScale = determingCorrectCircles(circles[0], float(paperW) / paperH)
corners = np.array(corners, dtype = np.float32)
# 未过滤的圆
print "circles: ", circles
# 半径平均值
print "avgRadius: ", avgRadius
# 过滤后的圆
print "correctCircles: ", correctCircles
# 过滤后的圆的圆心
print "corners: ", corners
# 畸变值
print "skew: ", skewScale
# 画出过滤前的圆
if circles.any():
# 调试:画圆
circles = np.uint16(np.around(circles))
for i in circles[0]:
cv2.circle(imgColor02,(i[0],i[1]),i[2],(0,255,0),2)
cv2.circle(imgColor02,(i[0],i[1]),2,(0,0,255),3)
# 画出过滤后的圆
blockListImg = []
if correctCircles:
# 调试:画圆
correctCirclesUint = np.uint16(np.around(correctCircles))
for i in correctCirclesUint:
cv2.circle(imgColor,(i[0],i[1]),i[2],(0,255,0),2)
cv2.circle(imgColor,(i[0],i[1]),2,(0,0,255),3)
# 映射,切割
transPs = np.array([[0, 0], [dw, 0], [dw, dh], [0, dh]], dtype = np.float32)
transform = cv2.getPerspectiveTransform(corners, transPs)
splitArea = cv2.warpPerspective(src = originalImg, M = transform, dsize = (dw, dh))
blockListImg.append(splitArea)
# 调试:切割目标区域
if blockList and correctCircles:
for blockCorner in blockList:
# 待切割区域对角点
topLeftX, topLeftY, bottomRightX, bottomRightY = np.array(blockCorner) * np.array([dw, dh, dw, dh])
# 待切割区域宽高
tmpW = int(bottomRightX - topLeftX)
tmpH = int(bottomRightY - topLeftY)
# 待切割区域四个边角, 不抵消畸变
fourPoints = np.array([[topLeftX, topLeftY], [bottomRightX, topLeftY], [bottomRightX, bottomRightY], [topLeftX, bottomRightY]])
# 待切割区域四个边角,抵消畸变
# 左上,右上,右下,左下
fourPointsAntiSkew = np.array([[topLeftX, topLeftY], [bottomRightX, topLeftY], [bottomRightX, bottomRightY], [topLeftX, bottomRightY]]) #- np.array(skewScale)
# 结果图像四个边角
transPsTmp = np.array([[0, 0], [tmpW, 0], [tmpW, tmpH], [0,tmpH]], dtype = np.float32)
cornersTmp = np.array(fourPoints, dtype = np.float32)
cornersTmpSkew = np.array(fourPointsAntiSkew, dtype = np.float32)
transform = cv2.getPerspectiveTransform(cornersTmp, transPsTmp)
splitAreaTmp = cv2.warpPerspective(src = splitArea, M = transform, dsize = (tmpW, tmpH))
transformSkew = cv2.getPerspectiveTransform(cornersTmpSkew, transPsTmp)
splitAreaTmpSkew = cv2.warpPerspective(src = splitArea, M = transformSkew, dsize = (tmpW, tmpH))
blockListImg.extend([splitAreaTmp, splitAreaTmpSkew])
showImg(img, imgColor02, imgColor, *blockListImg)
return
# main function
def houghTest(img):
img = cv2.resize(img, (600, 700))
originalImg = img
# 增加亮度和对比度, 加高光可以突出黑色区域, 弱化灰色区域
# img = cv2.convertScaleAbs(img, alpha = 1, beta = -60)
# 灰度图
img = grayImg(img)
imgSize = getImgSize(img)
minW = min(img.shape)
maxW = max(img.shape)
img = cv2.blur(img, (4, 4))
# 画出轮廓
edges0 = cv2.Canny(img, 10, 50, apertureSize = 3)
# edges = sobel(img)
# edges = cv2.blur(edges, (8, 8))
# edges = erosion(edges, iterations = 1)
edges = dilation(edges0, iterations = 2)
# 调试: 整体轮廓
edges = cv2.Canny(edges, 10, 50, apertureSize = 3)
# 寻找边框
# contours, hierarchy = findContours(edges, cv2.RETR_TREE)
# 白色外层轮廓
# blackImg = createBlackImg(img.shape)
# drawContours(blackImg, contours, (255, 255, 255), 1)
# drawContours(blackImg, contours, (255, 255, 255), 1)
# 边框黑色外层轮廓
# whiteImg = createWhiteImg(imgSize)
# 调试:绘制边框
# drawContours(whiteImg, contours, (0, 0, 0), 1)
# boundingBoxes = getBoundingRect(contours)
# for boundingBox in boundingBoxes:
# cv2.rectangle(img, (boundingBox[0], boundingBox[1]), (boundingBox[2], boundingBox[3]), (0, 0, 255), 1)
# 先找轮廓边线, 再找边线交出的矩形
# lines = cv2.HoughLines(edges, 1, np.pi / 360, 200)
# lines = getLinesFromPolarCoord(lines, maxW)
# lines = cv2.HoughLinesP(edges, 1, np.pi / 360, 50, minLineLength = 100, maxLineGap = 20)
lines = np.array(cv2.HoughLinesP(edges, 1, np.pi / 360, 50, minLineLength = 100, maxLineGap = 50))
# lines = remainLine(lines)
# 求倾角众数
mode = findMode(findAngle(lines), 7, 9)
mode[0][1][1].extend(mode[1][1][1])
lines = lines[mode[0][1][1]]
# 调试: 画出延长线
lines0 = expandLine(lines, int(maxW))
wimg0 = createWhiteImg(getImgSize(img))
drawLines(wimg0, lines0)
# 调试:画出边线
wimg = createWhiteImg(getImgSize(img))
drawLines(wimg, lines)
# 计算交点
intersectPoints = getIntersectPoints(lines, imgSize)
cornerPoints = getBoundingCornerPoints(intersectPoints, imgSize)
# 微调, 收缩边框
thresh = 190.0
threshZero = 1.0
cornerPoints[0] += minW / thresh * threshZero
cornerPoints[1][0] -= minW / thresh * threshZero
cornerPoints[1][1] += minW / thresh * threshZero
cornerPoints[2] -= minW / thresh * threshZero
cornerPoints[3][0] += minW / thresh * threshZero
cornerPoints[3][1] -= minW / thresh * threshZero
transPs = np.array([[0, 0], [imgSize[0], 0], [imgSize[0], imgSize[1]], [0, imgSize[1]]], dtype = np.float32)
transform = cv2.getPerspectiveTransform(cornerPoints, transPs)
# 调试:画出边框
wimg2 = createWhiteImg(imgSize)
drawRectP4(wimg2, np.array(cornerPoints).astype(np.int32))
# 画出前景
wimg3 = cv2.warpPerspective(src = originalImg, M = transform, dsize = imgSize)
# 在检测到的直线上找边框
# contours, hierarchy = findContours(wimg, cv2.RETR_TREE)
# contours, hierarchy = findContours(blackImg, cv2.RETR_TREE)
# wimg = createWhiteImg(imgSize)
# 画出边框
# drawContours(wimg, contours, (0, 0, 0), 1)
# 找到面积最大的区域
# maxAreaContours = sorted(contours, key = cv2.contourArea, reverse = True)[0]
# 求出其最小包围矩形
# rect = cv2.minAreaRect(maxAreaContours)
# 调试:画出矩形
# box = np.int0(cv2.boxPoints(rect))
# whiteImg = createWhiteImg(imgSize)
# drawContours(wimg, [maxAreaContours], (0, 0, 0), 1)
showImg(wimg0, wimg, wimg2, edges0, edges, originalImg, img, wimg3)
# main function
def ansAreaSplit(img):
img = cv2.resize(img, (600, 700))
originalImg = img
img = grayImg(img)
imgSize = getImgSize(img)
minW = min(img.shape)
maxW = max(img.shape)
img = cv2.blur(img, (4, 4))
# 画出轮廓
edges0 = cv2.Canny(img, 10, 50, apertureSize = 3)
edges = dilation(edges0, iterations = 2)
#edges = erosion(edges, iterations = 1)
#edges = erosion(edges, iterations = 2)
#edges = dilation(edges0, iterations = 3)
# 边框
contours, hierarchy = findContours(edges, cv2.RETR_TREE)
blackImg = createBlackImg(imgSize)
drawContours(blackImg, contours, (255, 255, 255), 1)
showImg(edges0, edges, blackImg)
# 从填涂区域包围框中选出正确的填涂框, 并计算中心坐标
# hierarchy结构:(前一个框序号,后一个框序号,第一个子框序号,父框序号)
# 序号为-1表示无
# 返回所有填涂区域的中心坐标和区域框
def findAnswerBoxCenter(rectangles, hierarchy):
hierarchy = hierarchy[0]
ansBoxCenter = []
topBoundingBox = []
for index, item in enumerate(hierarchy):
if item[3] != -1:
x1, y1, w, h = rectangles[index]
# centerX, centerY,S
ansBoxCenter.append(((x1 + x1 + w) / 2, (y1 + y1 + h) / 2, w * h))
elif rectangles[index][2] != -1:
topBoundingBox = rectangles[index]
return ansBoxCenter, topBoundingBox
# 通过填涂区域坐标确定填涂的答案
# ansBoxCenter为填涂区域中心坐标
# questionCount为竖排的题目数
# answerCount为每个题目备选答案数目
# W,H为作答区域的宽高
# 返回答题结果,questionCount * answerCount的二位数组,置1位代表填涂,置0位代表未填涂
def determineAnswer(ansBoxCenter, questionCount, answerCount, W, H, restrictArea = True, restrictAreaThresh = 0.2):
baseX = baseY = 0
stepX = int(float(W) / questionCount)
stepY = int(float(H) / answerCount)
standardS = stepX * stepY
answerMap = np.zeros((questionCount, answerCount))
for anSenter in ansBoxCenter:
ansX, ansY, S = anSenter
# 填涂面积至少为判定方格面积的20%
if restrictArea and S <= restrictAreaThresh * standardS:
continue
ansIndex = int((ansY - baseY) / stepY)
quesIndex = int((ansX - baseX) / stepX)
answerMap[quesIndex][ansIndex] = 1
return answerMap
# 横跨整个答题卡的长条形作答区域,包含多个题组
# 通过填涂区域坐标确定填涂的答案
# ansBoxCenter为填涂区域中心坐标
# questionCount为[!每个!]题组的竖排的题目数
# answerCount为每个[!每个!]题组的题目备选答案数目
# groupCount为题组个数
# W,H为作答区域的宽高
# spaceStep为每个题组的间隔长度,默认一个填涂区域大小
# 返回答题结果,questionCount * answerCount的二位数组,置1位代表填涂,置0位代表未填涂
def determineAnswerBar(ansBoxCenter, questionCount, answerCount, groupCount, W, H, spaceStep = 1, restrictArea = True, restrictAreaThresh = 0.2):
baseX = baseY = 0
stepX = int(float(W) / (questionCount * groupCount + groupCount - 1))
stepY = int(float(H) / answerCount)
standardS = stepX * stepY
answerMap = np.zeros((questionCount * groupCount, answerCount))
for anSenter in ansBoxCenter:
ansX, ansY, S = anSenter
# 填涂面积至少为判定方格面积的20%
if restrictArea and S <= restrictAreaThresh * standardS:
continue
ansIndex = int((ansY - baseY) / stepY)
quesIndePre = int((ansX - baseX) / stepX)
# gapIndex 填涂区域所属组序号, 从1编号
# determine spacegap index
# quesIndex -> [q * (i - 1) + 1, (q + 1) * i - 1], where q:questionCount, i:gapIndex
# q * (i - 1) + 1 <= x and (q + 1) * i - 1 >= x, where x: quesIndex
# resolve this euqation
# i -> [(x + 1) / (q + 1), (x + q) / (q + 1)]
gapIndex = np.ceil(float((quesIndePre + 1)) / (questionCount + 1)) # or np.floor(float((quesIndePre + questionCount)) / (questionCount + 1))
quesIndex = int(quesIndePre - gapIndex + 1)
answerMap[quesIndex][ansIndex] = 1
return answerMap
# main function
def readCard(img):
questionCount = 5
answerCount = 5
# 增加亮度和对比度, 加高光可以突出黑色区域, 弱化灰色区域
# img = cv2.convertScaleAbs(img, alpha = 1.3, beta = 20)
# 确定裁剪范围,row1,row2,col1,col2
areas = [[245, 325, 320, 480], [258, 325, 485, 615], [245, 325, 10, 615]]
area = None#areas[1]
groupCount = 1
# 灰度图
originalImg = img
img = grayImg(img)
w, h = img.shape
imgSize = getImgSize(img)
# otsu二值化
# img = binaryInv(img)
# 低通滤波
# img = cv2.blur(img, (3, 3))
# 腐蚀, 实际效果为涂抹填涂区域
# img = erosion(img)
# 膨胀, 实际效果为缩小填涂区域
# img = dilation(img, iterations = 1)
# 裁剪
# 裁剪
if area:
rectImg01 = img[area[0]: area[1], area[2]: area[3]]
else:
rectImg01 = img
# 角点检
# rectImg01_32 = np.float32(rectImg01)
# rectImg01 = cv2.cornerHarris(np.float32(rectImg01_32), 2, 3, 0.04)
# rectImg01 = dilation(rectImg01)
# rectImg01 = erosion(rectImg01)
# rectImg01 = binaryThresh(rectImg01, 0.01 * rectImg01.max(), 255, 1)
# # find centroids
# rectImg01 = np.uint8(rectImg01)
# ret, labels, stats, centroids = cv2.connectedComponentsWithStats(rectImg01)
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
# corners = np.int0(cv2.cornerSubPix(rectImg01_32, np.float32(centroids), (5, 5), (-1, -1), criteria))
# 寻找填涂区域数量
rectImg010 = erosion(rectImg01, iterations=3)
contours, hierarchy = findContours(rectImg010, cv2.RETR_TREE)
whiteImg00 = createWhiteImg(imgSize)
drawContours(whiteImg00, contours, (0, 0, 0), 1)
print "Count: ", len(hierarchy[0]) - 1
ansCount = len(hierarchy[0]) - 1
corners = np.int0(cv2.goodFeaturesToTrack(rectImg01, 20, 0.01, 10))
# 聚类,找出角点中心
cornersStack = np.hstack((corners[:, :, 0], corners[:, :, 1]))
pred = KMeans(n_clusters = ansCount).fit_predict(cornersStack)
_cornerMap = {}
for index, _corner in enumerate(cornersStack):
if pred[index] not in _cornerMap:
_cornerMap[pred[index]] = []
_cornerMap[pred[index]].append(_corner)
newCorners = []
for v in _cornerMap.values():
_sum = np.zeros(2)
c = 0
for p in v:
_sum += p
c += 1
avg = _sum / c
newCorners.append(np.int32(avg))
newCorners = np.array(newCorners)
# print corners
# originalImg[corners[:, 0, 1], corners[:, 0, 0]] = [0, 0, 255]
# rectImg01 = binaryThresh(rectImg01, 50, 255, cv2.THRESH_BINARY_INV)
# rectImg01 = np.uint8(rectImg01)
# # 白边
# row, col = rectImg01.shape
# rectImg01[0] = 255
# rectImg01[row - 1] = 255
# rectImg01[:, col - 1] = 255
# rectImg01[:, 0] = 255
# #rectImg01[1] = 255
# # 调试:寻找并在白色底图上画出轮廓
originalImg[newCorners[:, 1], newCorners[:, 0]] = [0, 0, 255]
# # 找出轮廓
# 角点标注
whiteImg = createWhiteImg(imgSize)
whiteImg[newCorners[:, 1], newCorners[:, 0]] = 0
contours, hierarchy = findContours(whiteImg, cv2.RETR_TREE)
# # 调试:画出轮廓
drawContours(whiteImg, contours, (0, 0, 0), 1)
# 得到填涂答案
boundingBox = getBoundingRect(contours)
ansBoxCenter, topBoundingBox = findAnswerBoxCenter(boundingBox, hierarchy)
# 单个题组
# ansMap = determineAnswer(ansBoxCenter, 5, 4, topBoundingBox[2] - topBoundingBox[0], topBoundingBox[3] - topBoundingBox[1])
# 四个题组
# ansMap = determineAnswerBar(ansBoxCenter, 5, 5, groupCount, topBoundingBox[2] - topBoundingBox[0], topBoundingBox[3] - topBoundingBox[1]
# , restrictArea = True, restrictAreaThresh = 0.05)
ansMap = determineAnswerBar(ansBoxCenter, questionCount, answerCount, groupCount, topBoundingBox[2] - topBoundingBox[0], topBoundingBox[3] - topBoundingBox[1]
, restrictArea = True, restrictAreaThresh = 0.00)
# 调试:画出轮廓
showImg(whiteImg00, rectImg01, whiteImg)
# showImg(whiteImg)
return ansMap.T
# main function
def readCard02(img, details = [], mode = "noise"):
if "area" not in details or not details["area"]:
area = None
else:
area = details["area"]
groupCount = 1 #details["groupCount"]
questionCount = 5 #details["questionCount"]
answerCount = 5 #details["answerCount"]
# 灰度图
img = grayImg(img)
w, h = img.shape
# otsu二值化
img = binaryInv(img)
# 低通滤波
if mode == "noise":
img = cv2.blur(img, (3, 3))
# 腐蚀, 实际效果为涂抹填涂区域
# img = erosion(img)
# 膨胀, 实际效果为缩小填涂区域
if mode == "noise":
img = dilation(img, iterations = 1)
# 裁剪
if area:
rectImg01 = img[area[0]: area[1], area[2]: area[3]]
else:
rectImg01 = img
# 白边框,防止贴近边缘的填涂区域被并入外围边框中
row, col = rectImg01.shape
rectImg01[0] = 255
rectImg01[row - 1] = 255
rectImg01[:, col - 1] = 255
rectImg01[:, 0] = 255
# 调试:寻找并在白色底图上画出轮廓
whiteImg = createWhiteImg((col, row))
# 找出轮廓
contours, hierarchy = findContours(rectImg01, cv2.RETR_TREE)
# 调试:画出轮廓
drawContours(whiteImg, contours, (0, 0, 0), 2)
# 得到填涂答案
boundingBox = getBoundingRect(contours)
ansBoxCenter, topBoundingBox = findAnswerBoxCenter(boundingBox, hierarchy)
# 单个题组
# ansMap = determineAnswer(ansBoxCenter, 5, 4, topBoundingBox[2] - topBoundingBox[0], topBoundingBox[3] - topBoundingBox[1])
# 四个题组
ansMap = determineAnswerBar(ansBoxCenter, questionCount, answerCount, groupCount, topBoundingBox[2] - topBoundingBox[0], topBoundingBox[3] - topBoundingBox[1]
, restrictArea = True, restrictAreaThresh = 0.02)
# 调试:画出轮廓
showImg(rectImg01, whiteImg)
return ansMap.T
if __name__ == "__main__":
img = readImg("/Volumes/SD/ML/scantron/pics/card01.jpg")
#houghTest(img)
readCard(img)
| mit |
untom/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
0x0all/scikit-learn | examples/plot_multilabel.py | 25 | 4261 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
pl.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
pl.subplot(2, 2, subplot)
pl.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
pl.scatter(X[:, 0], X[:, 1], s=40, c='gray')
pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
pl.xticks(())
pl.yticks(())
pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
pl.xlabel('First principal component')
pl.ylabel('Second principal component')
pl.legend(loc="upper left")
pl.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
pl.subplots_adjust(.04, .02, .97, .94, .09, .2)
pl.show()
| bsd-3-clause |
h2educ/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
tawsifkhan/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
jaeilepp/mne-python | examples/inverse/plot_lcmv_beamformer.py | 1 | 3405 | """
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer solutions on an evoked dataset for three different
choices of source orientation and store the solutions in stc files for
visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation, voxel: %i', 'Normal orientation, voxel: %i',
'Max-power orientation, voxel: %i']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
# compute unit-noise-gain beamformer with whitening of the leadfield and
# data (enabled by passing a noise covariance matrix)
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.05,
pick_ori=pick_ori, weight_norm='unit-noise-gain',
max_ori_out='signed')
# View activation time-series in maximum voxel at 100 ms:
time_idx = stc.time_as_index(0.1)
max_vox = np.argmax(stc.data[:, time_idx])
plt.plot(stc.times, stc.data[max_vox, :], color, hold=True,
label=desc % max_vox)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in maximum voxel')
plt.legend()
plt.show()
# take absolute value for plotting
stc.data[:, :] = np.abs(stc.data)
# Plot last stc in the brain in 3D with PySurfer if available
brain = stc.plot(hemi='lh', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s')
brain.show_view('lateral')
| bsd-3-clause |
megamorphf/decluterfy | main.py | 1 | 4542 | import logging
import spotipy.util as util
from spotipy_client import MLearnipy
from ml_items import DatasetFormer
from sklearn import tree
username = 'coder-hermes' # KILL ME FOR HARDCODE OR USE YOUR UNAME INSTEAD
selected_features = [
"id",
"duration_ms",
"acousticness",
"tempo",
"speechiness",
"mode",
"danceability",
"liveness",
"instrumentalness",
"loudness",
"time_signature",
"energy",
"valence",
"key"
]
def fetch_token(username='coder-hermes'):
return util.prompt_for_user_token(username, scope = 'playlist-modify-public')
def predict_playlists_for_unsorted_songs(already_sorted_set, incorrectly_sorted_set):
decision_tree_classifier = tree.DecisionTreeClassifier()
songs_not_in_unsorted_playlist = already_sorted_set.data
playlists_for_sorted_songs = already_sorted_set.target
# the 3 lines of magic
decision_tree_classifier.fit(songs_not_in_unsorted_playlist, playlists_for_sorted_songs)
incorrectly_sorted_songs = incorrectly_sorted_set.data
playlists_for_unsorted_songs = decision_tree_classifier.predict(incorrectly_sorted_songs)
return playlists_for_unsorted_songs
def make_track_names(item):
return '{} - {}'.format(item[0], item[1])
def user_confirms(message='\nY/n', negation_list=('n', 'N')):
prompt = input(message)
if prompt not in negation_list:
return True
else:
return False
def main():
token = fetch_token()
if token:
sp = MLearnipy(username, auth=token)
unsorted_playlist, all_playlists = sp.get_target_and_all_other_pls(selected_features)
# Form a data-frame for ml analysis
already_sorted_set = DatasetFormer(all_playlists, 'playlist_id', ['id'])
incorrectly_sorted_set = DatasetFormer(unsorted_playlist, 'playlist_id', ['id'])
# playlists_for_unsorted_songs contains ids for playlists to which unsorted songs have to be moved
# elements of playlists_for_unsorted_songs are in the same order as songs in incorrectly_sorted_set
playlists_for_unsorted_songs = predict_playlists_for_unsorted_songs(already_sorted_set, incorrectly_sorted_set)
logger.debug("predicted list: {}".format(playlists_for_unsorted_songs))
logger.debug("names: {}".format(already_sorted_set.target_names))
logger.debug("ids : {}".format(already_sorted_set.targets_as_ids))
remaped_playlist = already_sorted_set.remap_list_of_targets_to_initial_value(playlists_for_unsorted_songs)
logger.debug("Songs remapped: {}".format(remaped_playlist))
# returns destination playlist from their respective ids
pl_names = sp.last_fetch_of_all_pls
sp.print_separator(' Decluter suggestions ')
# gets all song ids
song_ids_list = incorrectly_sorted_set.popped_entries['id']
raw_track_list = sp.resolve_song_names_from_id_list(song_ids_list)
tracks = list(
map(make_track_names, raw_track_list))
for index,playlist in list(enumerate(remaped_playlist, start=0)):
# todo add song names here:
print('#{}\t{}\t->\t{}'.format(index, tracks[index], sp.find_in_list_of_tuples(pl_names, playlist, 0, 1)))
sp.print_separator(' Chose songs you want to move ')
for index, playlist in list(enumerate(remaped_playlist, start=0)):
track = tracks[index]
pl_name = sp.find_in_list_of_tuples(pl_names, playlist, 0, 1)
print('Move:\t{}\n#{}\t{} -> {}\n'.format((raw_track_list[index][2]),index, track, pl_name))
# asks if user want to move a song to a predicted playlist
if user_confirms():
print('Moving...')
# todo: add moving here
sp.user_playlist_add_tracks(username, playlist, [song_ids_list[index]])
print('Do you wish to delete the song you just moved?')
if user_confirms():
print('Deleting...')
# todo: delete songs here
sp.user_playlist_remove_all_occurrences_of_tracks(username, unsorted_playlist['playlist_id'][0], [song_ids_list[index]])
else:
pass
else:
pass
sp.print_separator('')
sp.print_separator(' Decluterfy finished ')
if __name__ == '__main__':
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
main()
| mit |
FEniCS/mshr | demo/python/csg-subdomains-2D.py | 1 | 1532 | # Copyright (C) 2012-2014 Benjamin Kehlet
#
# This file is part of mshr.
#
# mshr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mshr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mshr. If not, see <http://www.gnu.org/licenses/>.
import dolfin
from mshr import *
#dolfin.set_log_level(dolfin.TRACE)
# Define 2D geometry
domain = Rectangle(dolfin.Point(0., 0.), dolfin.Point(5., 5.)) \
- Rectangle(dolfin.Point(2., 1.25), dolfin.Point(3., 1.75)) \
- Circle(dolfin.Point(1, 4), .25) \
- Circle(dolfin.Point(4, 4), .25)
domain.set_subdomain(1, Rectangle(dolfin.Point(1., 1.), dolfin.Point(4., 3.)))
domain.set_subdomain(2, Rectangle(dolfin.Point(2., 2.), dolfin.Point(3., 4.)))
print("Verbose output of 2D geometry:")
dolfin.info(domain, True)
# Generate and plot mesh
mesh2d = generate_mesh(domain, 45)
print(mesh2d)
dolfin.plot(mesh2d, "2D mesh")
# Convert subdomains to mesh function for plotting
mf = dolfin.MeshFunction("size_t", mesh2d, 2, mesh2d.domains())
dolfin.plot(mf, "Subdomains")
# import matplotlib.pyplot as plt
# plt.show()
| gpl-3.0 |
JeyZeta/Dangerous | Dangerous/Golismero/thirdparty_libs/nltk/probability.py | 12 | 81647 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (additions)
# Trevor Cohn <[email protected]> (additions)
# Peter Ljunglöf <[email protected]> (additions)
# Liang Dong <[email protected]> (additions)
# Geoffrey Sampson <[email protected]> (additions)
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
_NINF = float('-1e300')
import math
import random
import warnings
from operator import itemgetter
from itertools import imap, islice
from collections import defaultdict
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
# [SB] inherit from defaultdict?
# [SB] for NLTK 3.0, inherit from collections.Counter?
class FreqDist(dict):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist.inc(word.lower())
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
dict.__init__(self)
self._N = 0
self._reset_caches()
if samples:
self.update(samples)
def inc(self, sample, count=1):
"""
Increment this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any
:param count: The amount to increment the sample's count by.
:type count: int
:rtype: None
:raise NotImplementedError: If ``sample`` is not a
supported sample type.
"""
if count == 0: return
self[sample] = self.get(sample,0) + count
def __setitem__(self, sample, value):
"""
Set this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any hashable object
:param count: The new value for the sample's count
:type count: int
:rtype: None
:raise TypeError: If ``sample`` is not a supported sample type.
"""
self._N += (value - self.get(sample, 0))
dict.__setitem__(self, sample, value)
# Invalidate the caches
self._reset_caches()
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return self._N
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def samples(self):
"""
Return a list of all samples that have been recorded as
outcomes by this frequency distribution. Use ``fd[sample]``
to determine the count for each sample.
:rtype: list
"""
return self.keys()
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
"""
Return the number of samples with count r.
:type r: int
:param r: A sample count.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self:
c = self.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def _cumulative_frequencies(self, samples=None):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type sample: any
:rtype: list(float)
"""
cf = 0.0
if not samples:
samples = self.keys()
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self._N is 0:
return 0
return float(self[sample]) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if self._max_cache is None:
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
self._max_cache = max([(a,b) for (b,a) in self.items()])[1]
return self._max_cache
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab). '
'See http://matplotlib.sourceforge.net/')
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print "%4s" % str(samples[i]),
print
for i in range(len(samples)):
print "%4d" % freqs[i],
print
def _sort_keys_by_value(self):
if not self._item_cache:
self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0]))
def keys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(0), self._item_cache)
def values(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(1), self._item_cache)
def items(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: list(tuple)
"""
self._sort_keys_by_value()
return self._item_cache[:]
def __iter__(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def iterkeys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def itervalues(self):
"""
Return the values sorted in decreasing order.
:rtype: iter
"""
return iter(self.values())
def iteritems(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: iter of any
"""
self._sort_keys_by_value()
return iter(self._item_cache)
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def update(self, samples):
"""
Update the frequency distribution with the provided list of samples.
This is a faster way to add multiple samples to the distribution.
:param samples: The samples to add.
:type samples: list
"""
try:
sample_iter = samples.iteritems()
except:
sample_iter = imap(lambda x: (x,1), samples)
for sample, count in sample_iter:
self.inc(sample, count=count)
def pop(self, other):
self._N -= 1
self._reset_caches()
return dict.pop(self, other)
def popitem(self):
self._N -= 1
self._reset_caches()
return dict.popitem(self)
def clear(self):
self._N = 0
self._reset_caches()
dict.clear(self)
def _reset_caches(self):
self._Nr_cache = None
self._max_cache = None
self._item_cache = None
def __add__(self, other):
clone = self.copy()
clone.update(other)
return clone
def __le__(self, other):
if not isinstance(other, FreqDist): return False
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
def __lt__(self, other):
if not isinstance(other, FreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, FreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, FreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]]
if len(self) > 10:
items.append('...')
return '<FreqDist: %s>' % ', '.join(items)
def __getitem__(self, sample):
return self.get(sample, 0)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p, 2)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, 1-p))
return random.choice(list(self.samples()))
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probabiliy to all values.
"""
if prob_dict is None:
self._prob_dict = {}
else:
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return 2**(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is paramaterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalant to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.N())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([fd.keys() for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalising factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
if c == 0:
return self._P0
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# The situation frequency zero is quite common in the original
# Good-Turing estimation. Bill Gale and Geoffrey Sampson present a
# simple and effective approach, Simple Good-Turing. As a smoothing
# curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationsihp)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greather than the standar deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count *c\**:
- *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
- *things with frequency zero in training* = N(1) for c == 0
where *c* is the original count, *N(i)* is the number of event types
observed with count *i*. We can think the count of unseen as the count
of frequency one (see Jurafsky & Martin 2nd Edition, p101).
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
count = self._freqdist[sample]
# unseen sample's frequency (count zero) uses frequency one's
if count == 0 and self._freqdist.N() != 0:
p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._bins == self._freqdist.B():
p0 = 0.0
else:
p0 = p0 / (1.0 * self._bins - self._freqdist.B())
nc = self._freqdist.Nr(count)
ncn = self._freqdist.Nr(count + 1)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0
return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
"""
:return: The probability mass transferred from the
seen samples to the unseen samples.
:rtype: float
"""
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to freqency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the freqency and
yi denotes the freqency of freqency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'Bins parameter must not be less than freqdist.B() + 1'
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
r, nr = [], []
b, i = 0, 0
while b != self._freqdist.B():
nr_i = self._freqdist.Nr(i)
if nr_i > 0:
b += nr_i
r.append(i)
nr.append(nr_i)
i += 1
return (r, nr)
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
if j > 0:
i = r[j-1]
else:
i = 0
if j != len(r) - 1:
k = r[j+1]
else:
k = 2 * r[j] - i
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
if x_var != 0:
self._slope = xy_cov / x_var
else:
self._slope = 0.0
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of freqency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print "Probability Sum:", prob_sum
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
try:
import numpy
except ImportError:
print "Error: Please install numpy; for instructions see http://www.nltk.org/"
exit()
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return 2**(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return self._data[i]
else:
return math.log(self._data[i], 2)
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = math.log(prob, 2)
else:
if log: self._data[i] = 2**(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = [pdist.prob(s) for s in pdist.samples()]
return -sum([p * math.log(p,2) for p in probs])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition].inc(word)
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 6 outcomes>
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond].inc(sample)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return sorted(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.itervalues())
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = str(condition)
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len(str(c)) for c in conditions)
print ' ' * condition_size,
for s in samples:
print "%4s" % str(s),
print
for c in conditions:
print "%*s" % (condition_size, str(c)),
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print "%4d" % f,
print
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
class ConditionalProbDistI(defaultdict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return self.keys()
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
# self._probdist_factory = probdist_factory
# self._cfdist = cfdist
# self._factory_args = factory_args
# self._factory_kw_args = factory_kw_args
factory = lambda: probdist_factory(FreqDist(),
*factory_args, **factory_kw_args)
defaultdict.__init__(self, factory)
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
defaultdict.__init__(self, DictionaryProbDist)
self.update(probdist_dict)
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
GoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
print 'Generating:'
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print '%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55])
print
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
gt = GoodTuringProbDist(fd)
sgt = SimpleGoodTuringProbDist(fd)
katz = SimpleGoodTuringProbDist(fd, 7)
print '%18s %8s %12s %14s %12s' \
% ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" )
for key in fd:
print '%18s %8d %12e %14e %12e' \
% (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| mit |
walterreade/scikit-learn | sklearn/feature_selection/tests/test_base.py | 143 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
andybrnr/QuantEcon.py | quantecon/estspec.py | 7 | 4856 | """
Filename: estspec.py
Authors: Thomas Sargent, John Stachurski
Functions for working with periodograms of scalar data.
"""
from __future__ import division, print_function
import numpy as np
from numpy.fft import fft
from pandas import ols, Series
def smooth(x, window_len=7, window='hanning'):
"""
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
"""
if len(x) < window_len:
raise ValueError("Input vector length must be >= window length.")
if window_len < 3:
raise ValueError("Window length must be at least 3.")
if not window_len % 2: # window_len is even
window_len += 1
print("Window length reset to {}".format(window_len))
windows = {'hanning': np.hanning,
'hamming': np.hamming,
'bartlett': np.bartlett,
'blackman': np.blackman,
'flat': np.ones # moving average
}
# === Reflect x around x[0] and x[-1] prior to convolution === #
k = int(window_len / 2)
xb = x[:k] # First k elements
xt = x[-k:] # Last k elements
s = np.concatenate((xb[::-1], x, xt[::-1]))
# === Select window values === #
if window in windows.keys():
w = windows[window](window_len)
else:
msg = "Unrecognized window type '{}'".format(window)
print(msg + " Defaulting to hanning")
w = windows['hanning'](window_len)
return np.convolve(w / w.sum(), s, mode='valid')
def periodogram(x, window=None, window_len=7):
"""
Computes the periodogram
.. math::
I(w) = (1 / n) | sum_{t=0}^{n-1} x_t e^{itw} |^2
at the Fourier frequences w_j := 2 pi j / n, j = 0, ..., n - 1,
using the fast Fourier transform. Only the frequences w_j in [0,
pi] and corresponding values I(w_j) are returned. If a window type
is given then smoothing is performed.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional(default=7)
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
"""
n = len(x)
I_w = np.abs(fft(x))**2 / n
w = 2 * np.pi * np.arange(n) / n # Fourier frequencies
w, I_w = w[:int(n/2)+1], I_w[:int(n/2)+1] # Take only values on [0, pi]
if window:
I_w = smooth(I_w, window_len=window_len, window=window)
return w, I_w
def ar_periodogram(x, window='hanning', window_len=7):
"""
Compute periodogram from data x, using prewhitening, smoothing and
recoloring. The data is fitted to an AR(1) model for prewhitening,
and the residuals are used to compute a first-pass periodogram with
smoothing. The fitted coefficients are then used for recoloring.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
"""
# === run regression === #
x_current, x_lagged = x[1:], x[:-1] # x_t and x_{t-1}
x_current, x_lagged = Series(x_current), Series(x_lagged) # pandas series
results = ols(y=x_current, x=x_lagged, intercept=True, nw_lags=1)
e_hat = results.resid.values
phi = results.beta['x']
# === compute periodogram on residuals === #
w, I_w = periodogram(e_hat, window=window, window_len=window_len)
# === recolor and return === #
I_w = I_w / np.abs(1 - phi * np.exp(1j * w))**2
return w, I_w
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/sandbox/infotheo.py | 33 | 16417 | """
Information Theoretic and Entropy Measures
References
----------
Golan, As. 2008. "Information and Entropy Econometrics -- A Review and
Synthesis." Foundations And Trends in Econometrics 2(1-2), 1-145.
Golan, A., Judge, G., and Miller, D. 1996. Maximum Entropy Econometrics.
Wiley & Sons, Chichester.
"""
#For MillerMadow correction
#Miller, G. 1955. Note on the bias of information estimates. Info. Theory
# Psychol. Prob. Methods II-B:95-100.
#For ChaoShen method
#Chao, A., and T.-J. Shen. 2003. Nonparametric estimation of Shannon's index of diversity when
#there are unseen species in sample. Environ. Ecol. Stat. 10:429-443.
#Good, I. J. 1953. The population frequencies of species and the estimation of population parameters.
#Biometrika 40:237-264.
#Horvitz, D.G., and D. J. Thompson. 1952. A generalization of sampling without replacement from a finute universe. J. Am. Stat. Assoc. 47:663-685.
#For NSB method
#Nemenman, I., F. Shafee, and W. Bialek. 2002. Entropy and inference, revisited. In: Dietterich, T.,
#S. Becker, Z. Gharamani, eds. Advances in Neural Information Processing Systems 14: 471-478.
#Cambridge (Massachusetts): MIT Press.
#For shrinkage method
#Dougherty, J., Kohavi, R., and Sahami, M. (1995). Supervised and unsupervised discretization of
#continuous features. In International Conference on Machine Learning.
#Yang, Y. and Webb, G. I. (2003). Discretization for naive-bayes learning: managing discretization
#bias and variance. Technical Report 2003/131 School of Computer Science and Software Engineer-
#ing, Monash University.
from statsmodels.compat.python import range, lzip, lmap
from scipy import stats
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import logsumexp as sp_logsumexp
#TODO: change these to use maxentutils so that over/underflow is handled
#with the logsumexp.
def logsumexp(a, axis=None):
"""
Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array-like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished.
"""
if axis is None:
# Use the scipy.maxentropy version.
return sp_logsumexp(a)
a = np.asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = np.log(np.exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def _isproperdist(X):
"""
Checks to see if `X` is a proper probability distribution
"""
X = np.asarray(X)
if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):
return False
else:
return True
def discretize(X, method="ef", nbins=None):
"""
Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : string
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
--------
"""
nobs = len(X)
if nbins == None:
nbins = np.floor(np.sqrt(nobs))
if method == "ef":
discrete = np.ceil(nbins * stats.rankdata(X)/nobs)
if method == "ew":
width = np.max(X) - np.min(X)
width = np.floor(width/nbins)
svec, ivec = stats.fastsort(X)
discrete = np.zeros(nobs)
binnum = 1
base = svec[0]
discrete[ivec[0]] = binnum
for i in range(1,nobs):
if svec[i] < base + width:
discrete[ivec[i]] = binnum
else:
base = svec[i]
binnum += 1
discrete[ivec[i]] = binnum
return discrete
#TODO: looks okay but needs more robust tests for corner cases
def logbasechange(a,b):
"""
There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a)
"""
return np.log(b)/np.log(a)
def natstobits(X):
"""
Converts from nats to bits
"""
return logbasechange(np.e, 2) * X
def bitstonats(X):
"""
Converts from bits to nats
"""
return logbasechange(2, np.e) * X
#TODO: make this entropy, and then have different measures as
#a method
def shannonentropy(px, logbase=2):
"""
This is Shannon's entropy
Parameters
-----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0
"""
#TODO: haven't defined the px,py case?
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
if logbase != 2:
return logbasechange(2,logbase) * entropy
else:
return entropy
# Shannon's information content
def shannoninfo(px, logbase=2):
"""
Shannon's information
Parameters
----------
px : float or array-like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px)
"""
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
if logbase != 2:
return - logbasechange(2,logbase) * np.log2(px)
else:
return - np.log2(px)
def condentropy(px, py, pxpy=None, logbase=2):
"""
Return the conditional entropy of X given Y.
Parameters
----------
px : array-like
py : array-like
pxpy : array-like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j]
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
if logbase == 2:
return condent
else:
return logbasechange(2, logbase) * condent
def mutualinfo(px,py,pxpy, logbase=2):
"""
Returns the mutual information between X and Y.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy,
logbase=logbase)
def corrent(px,py,pxpy,logbase=2):
"""
An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,
logbase=logbase)
def covent(px,py,pxpy,logbase=2):
"""
An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
#### Generalized Entropies ####
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array-like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
raise ValueError("px is not a proper probability distribution")
alpha = float(alpha)
if alpha == 1:
genent = shannonentropy(px)
if logbase != 2:
return logbasechange(2, logbase) * genent
return genent
elif 'inf' in string(alpha).lower() or alpha == np.inf:
return -np.log(np.max(px))
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent
#TODO: before completing this, need to rethink the organization of
# (relative) entropy measures, ie., all put into one function
# and have kwdargs, etc.?
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
"""
if __name__ == "__main__":
print("From Golan (2008) \"Information and Entropy Econometrics -- A Review \
and Synthesis")
print("Table 3.1")
# Examples from Golan (2008)
X = [.2,.2,.2,.2,.2]
Y = [.322,.072,.511,.091,.004]
for i in X:
print(shannoninfo(i))
for i in Y:
print(shannoninfo(i))
print(shannonentropy(X))
print(shannonentropy(Y))
p = [1e-5,1e-4,.001,.01,.1,.15,.2,.25,.3,.35,.4,.45,.5]
plt.subplot(111)
plt.ylabel("Information")
plt.xlabel("Probability")
x = np.linspace(0,1,100001)
plt.plot(x, shannoninfo(x))
# plt.show()
plt.subplot(111)
plt.ylabel("Entropy")
plt.xlabel("Probability")
x = np.linspace(0,1,101)
plt.plot(x, lmap(shannonentropy, lzip(x,1-x)))
# plt.show()
# define a joint probability distribution
# from Golan (2008) table 3.3
w = np.array([[0,0,1./3],[1/9.,1/9.,1/9.],[1/18.,1/9.,1/6.]])
# table 3.4
px = w.sum(0)
py = w.sum(1)
H_X = shannonentropy(px)
H_Y = shannonentropy(py)
H_XY = shannonentropy(w)
H_XgivenY = condentropy(px,py,w)
H_YgivenX = condentropy(py,px,w)
# note that cross-entropy is not a distance measure as the following shows
D_YX = logbasechange(2,np.e)*stats.entropy(px, py)
D_XY = logbasechange(2,np.e)*stats.entropy(py, px)
I_XY = mutualinfo(px,py,w)
print("Table 3.3")
print(H_X,H_Y, H_XY, H_XgivenY, H_YgivenX, D_YX, D_XY, I_XY)
print("discretize functions")
X=np.array([21.2,44.5,31.0,19.5,40.6,38.7,11.1,15.8,31.9,25.8,20.2,14.2,
24.0,21.0,11.3,18.0,16.3,22.2,7.8,27.8,16.3,35.1,14.9,17.1,28.2,16.4,
16.5,46.0,9.5,18.8,32.1,26.1,16.1,7.3,21.4,20.0,29.3,14.9,8.3,22.5,
12.8,26.9,25.5,22.9,11.2,20.7,26.2,9.3,10.8,15.6])
discX = discretize(X)
#CF: R's infotheo
#TODO: compare to pyentropy quantize?
print
print("Example in section 3.6 of Golan, using table 3.3")
print("Bounding errors using Fano's inequality")
print("H(P_{e}) + P_{e}log(K-1) >= H(X|Y)")
print("or, a weaker inequality")
print("P_{e} >= [H(X|Y) - 1]/log(K)")
print("P(x) = %s" % px)
print("X = 3 has the highest probability, so this is the estimate Xhat")
pe = 1 - px[2]
print("The probability of error Pe is 1 - p(X=3) = %0.4g" % pe)
H_pe = shannonentropy([pe,1-pe])
print("H(Pe) = %0.4g and K=3" % H_pe)
print("H(Pe) + Pe*log(K-1) = %0.4g >= H(X|Y) = %0.4g" % \
(H_pe+pe*np.log2(2), H_XgivenY))
print("or using the weaker inequality")
print("Pe = %0.4g >= [H(X) - 1]/log(K) = %0.4g" % (pe, (H_X - 1)/np.log2(3)))
print("Consider now, table 3.5, where there is additional information")
print("The conditional probabilities of P(X|Y=y) are ")
w2 = np.array([[0.,0.,1.],[1/3.,1/3.,1/3.],[1/6.,1/3.,1/2.]])
print(w2)
# not a proper distribution?
print("The probability of error given this information is")
print("Pe = [H(X|Y) -1]/log(K) = %0.4g" % ((np.mean([0,shannonentropy(w2[1]),shannonentropy(w2[2])])-1)/np.log2(3)))
print("such that more information lowers the error")
### Stochastic processes
markovchain = np.array([[.553,.284,.163],[.465,.312,.223],[.420,.322,.258]])
| bsd-3-clause |
DakotaNelson/SoftwareSystems | lecture14/thinkplot.py | 88 | 12565 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
return cls.color_iter
def PrePlot(num=None, rows=1, cols=1):
"""Takes hints about what's coming.
num: number of lines that will be plotted
"""
if num:
Brewer.InitializeIter(num)
# TODO: get sharey and sharex working. probably means switching
# to subplots instead of subplot.
# also, get rid of the gray background.
if rows > 1 or cols > 1:
pyplot.subplots(rows, cols, sharey=True)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(rows, cols, plot_number):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.iteritems():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(xs, ys, style='', **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
color_iter = Brewer.GetIter()
if color_iter:
try:
options = Underride(options, color=color_iter.next())
except StopIteration:
print 'Warning: Brewer ran out of colors.'
Brewer.ClearIter()
options = Underride(options, linewidth=3, alpha=0.8)
pyplot.plot(xs, ys, style, **options)
def Scatter(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
pyplot.scatter(xs, ys, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, fs = hist.Render()
width = min(Diff(xs))
if hist.name:
options = Underride(options, label=hist.name)
options = Underride(options,
align='center',
linewidth=0,
width=width)
pyplot.bar(xs, fs, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs.pop()
ps.pop()
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs.pop(0)
ps.pop(0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.name:
options = Underride(options, label=cdf.name)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
loc = options.get('loc', 0)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
# TODO: figure out how to show more than one plot
Config(**options)
pyplot.show()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print 'Writing', filename
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print color
if __name__ == '__main__':
main()
| gpl-3.0 |
harisbal/pandas | pandas/tests/indexes/datetimes/test_indexing.py | 1 | 25831 | from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
import pandas.compat as compat
import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, CDay
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestGetItem(object):
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_dti_business_getitem(self):
rng = pd.bdate_range(START, END)
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5])
tm.assert_index_equal(smaller, exp)
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == BDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
def test_dti_business_getitem_matplotlib_hackaround(self):
rng = pd.bdate_range(START, END)
values = rng[:, None]
expected = rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_dti_custom_getitem(self):
rng = pd.bdate_range(START, END, freq='C')
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5])
tm.assert_index_equal(smaller, exp)
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == CDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
def test_dti_custom_getitem_matplotlib_hackaround(self):
rng = pd.bdate_range(START, END, freq='C')
values = rng[:, None]
expected = rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
class TestWhere(object):
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2))
expected = i2
tm.assert_index_equal(result, expected)
class TestTake(object):
def test_take(self):
# GH#10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
# TODO: This method came from test_datetime; de-dup with version above
@pytest.mark.parametrize('tz', [None, 'US/Eastern', 'Asia/Tokyo'])
def test_take2(self, tz):
dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15),
datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)]
idx = DatetimeIndex(start='2010-01-01 09:00',
end='2010-02-01 09:00', freq='H', tz=tz,
name='idx')
expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz)
taken1 = idx.take([5, 6, 8, 12])
taken2 = idx[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, DatetimeIndex)
assert taken.freq is None
assert taken.tz == expected.tz
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_with_timezone(self):
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', tz='US/Eastern')
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', tz='US/Eastern')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', tz='US/Eastern')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', tz='US/Eastern')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
class TestDatetimeIndex(object):
@pytest.mark.parametrize('null', [None, np.nan, pd.NaT])
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern'])
def test_insert_nat(self, tz, null):
# GH#16537, GH#18295 (test missing)
idx = pd.DatetimeIndex(['2017-01-01'], tz=tz)
expected = pd.DatetimeIndex(['NaT', '2017-01-01'], tz=tz)
res = idx.insert(0, null)
tm.assert_index_equal(res, expected)
def test_insert(self):
idx = DatetimeIndex(
['2000-01-04', '2000-01-01', '2000-01-02'], name='idx')
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'], name='idx')
tm.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted',
datetime(2000, 1, 1),
datetime(2000, 1, 2)], name='idx')
assert not isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
idx = date_range('1/1/2000', periods=3, freq='M', name='idx')
# preserve freq
expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29',
'2000-03-31'], name='idx', freq='M')
expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',
'2000-04-30'], name='idx', freq='M')
# reset freq to None
expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31',
'2000-02-29',
'2000-03-31'], name='idx',
freq=None)
expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29',
'2000-03-31',
'2000-01-02'], name='idx',
freq=None)
cases = [(0, datetime(1999, 12, 31), expected_0),
(-3, datetime(1999, 12, 31), expected_0),
(3, datetime(2000, 4, 30), expected_3),
(1, datetime(2000, 1, 31), expected_1_nofreq),
(3, datetime(2000, 1, 2), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# reset freq to None
result = idx.insert(3, datetime(2000, 1, 2))
expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',
'2000-01-02'], name='idx', freq=None)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq is None
# see gh-7299
idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo',
name='idx')
with pytest.raises(ValueError):
idx.insert(3, pd.Timestamp('2000-01-04'))
with pytest.raises(ValueError):
idx.insert(3, datetime(2000, 1, 4))
with pytest.raises(ValueError):
idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern'))
with pytest.raises(ValueError):
idx.insert(3, datetime(2000, 1, 4,
tzinfo=pytz.timezone('US/Eastern')))
for tz in ['US/Pacific', 'Asia/Singapore']:
idx = date_range('1/1/2000 09:00', periods=6, freq='H', tz=tz,
name='idx')
# preserve freq
expected = date_range('1/1/2000 09:00', periods=7, freq='H', tz=tz,
name='idx')
for d in [pd.Timestamp('2000-01-01 15:00', tz=tz),
pytz.timezone(tz).localize(datetime(2000, 1, 1, 15))]:
result = idx.insert(6, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00',
'2000-01-01 12:00', '2000-01-01 13:00',
'2000-01-01 14:00',
'2000-01-01 10:00'], name='idx',
tz=tz, freq=None)
# reset freq to None
for d in [pd.Timestamp('2000-01-01 10:00', tz=tz),
pytz.timezone(tz).localize(datetime(2000, 1, 1, 10))]:
result = idx.insert(6, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.tz == expected.tz
assert result.freq is None
def test_delete(self):
idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')
# prserve freq
expected_0 = date_range(start='2000-02-01', periods=4, freq='M',
name='idx')
expected_4 = date_range(start='2000-01-01', periods=4, freq='M',
name='idx')
# reset freq to None
expected_1 = DatetimeIndex(['2000-01-31', '2000-03-31', '2000-04-30',
'2000-05-31'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
with pytest.raises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
for tz in [None, 'Asia/Tokyo', 'US/Pacific']:
idx = date_range(start='2000-01-01 09:00', periods=10, freq='H',
name='idx', tz=tz)
expected = date_range(start='2000-01-01 10:00', periods=9,
freq='H', name='idx', tz=tz)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freqstr == 'H'
assert result.tz == expected.tz
expected = date_range(start='2000-01-01 09:00', periods=9,
freq='H', name='idx', tz=tz)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freqstr == 'H'
assert result.tz == expected.tz
def test_delete_slice(self):
idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = date_range(start='2000-01-04', periods=7, freq='D',
name='idx')
expected_7_9 = date_range(start='2000-01-01', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03',
'2000-01-07', '2000-01-08', '2000-01-09',
'2000-01-10'], freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
result = idx.delete(slice(n[0], n[-1] + 1))
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
for tz in [None, 'Asia/Tokyo', 'US/Pacific']:
ts = pd.Series(1, index=pd.date_range(
'2000-01-01 09:00', periods=10, freq='H', name='idx', tz=tz))
# preserve freq
result = ts.drop(ts.index[:5]).index
expected = pd.date_range('2000-01-01 14:00', periods=5, freq='H',
name='idx', tz=tz)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
# reset freq to None
result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index
expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 11:00',
'2000-01-01 13:00',
'2000-01-01 15:00', '2000-01-01 17:00'],
freq=None, name='idx', tz=tz)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
assert result.tz == expected.tz
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH#1062
index = DatetimeIndex(['1/3/2000'])
with pytest.raises(KeyError) as excinfo:
index.get_loc('1/1/2000')
assert '2000' in str(excinfo.value)
@pytest.mark.parametrize('key', [pd.Timedelta(0),
pd.Timedelta(1),
timedelta(0)])
def test_timedelta_invalid_key(self, key):
# GH#20464
dti = pd.date_range('1970-01-01', periods=10)
with pytest.raises(TypeError):
dti.get_loc(key)
def test_get_loc_nat(self):
# GH#20464
index = DatetimeIndex(['1/3/2000', 'NaT'])
assert index.get_loc(pd.NaT) == 1
| bsd-3-clause |
rubensmachado/nolearn | nolearn/lasagne/tests/test_base.py | 2 | 19397 | import pickle
from lasagne.layers import ConcatLayer
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import Layer
from lasagne.nonlinearities import identity
from lasagne.nonlinearities import softmax
from lasagne.objectives import categorical_crossentropy
from lasagne.updates import nesterov_momentum
from mock import Mock
from mock import patch
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
import theano
import theano.tensor as T
floatX = theano.config.floatX
class TestLayers:
@pytest.fixture
def layers(self):
from nolearn.lasagne.base import Layers
return Layers([('one', 1), ('two', 2), ('three', 3)])
def test_getitem_with_key(self, layers):
assert layers['one'] == 1
def test_getitem_with_index(self, layers):
assert layers[0] == 1
def test_getitem_with_slice(self, layers):
assert layers[:2] == [1, 2]
def test_keys_returns_list(self, layers):
assert layers.keys() == ['one', 'two', 'three']
def test_values_returns_list(self, layers):
assert layers.values() == [1, 2, 3]
class TestFunctionalMNIST:
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
def test_train_history(self, net_fitted):
history = net_fitted.train_history_
assert len(history) == 2 # due to early stopping
assert history[1]['valid_accuracy'] > 0.85
assert history[1]['valid_accuracy'] > history[0]['valid_accuracy']
assert set(history[0].keys()) == set([
'dur', 'epoch', 'train_loss', 'train_loss_best',
'valid_loss', 'valid_loss_best', 'valid_accuracy',
])
def test_early_stopping(self, net_fitted):
early_stopping = net_fitted.on_epoch_finished[0]
assert early_stopping.train_history == net_fitted.train_history_
def test_pickle(self, net_fitted, X_test, y_pred):
pickled = pickle.dumps(net_fitted, -1)
net_loaded = pickle.loads(pickled)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_net(self, net, net_fitted, X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_params_values(self, net, net_fitted,
X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted.get_all_params_values())
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_save_params_to_path(self, net_fitted, X_test, y_pred):
path = '/tmp/test_lasagne_functional_mnist.params'
net_fitted.save_params_to(path)
net_loaded = clone(net_fitted)
net_loaded.load_params_from(path)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_message(self, net, net_fitted, capsys):
net2 = clone(net)
net2.verbose = 1
net2.load_params_from(net_fitted)
out = capsys.readouterr()[0]
message = """\
Loaded parameters to layer 'conv1' (shape 8x1x5x5).
Loaded parameters to layer 'conv1' (shape 8).
Loaded parameters to layer 'conv2' (shape 8x8x5x5).
Loaded parameters to layer 'conv2' (shape 8).
Loaded parameters to layer 'hidden1' (shape 128x128).
Loaded parameters to layer 'hidden1' (shape 128).
Loaded parameters to layer 'output' (shape 128x10).
Loaded parameters to layer 'output' (shape 10).
"""
assert out == message
def test_lasagne_functional_grid_search(mnist, monkeypatch):
# Make sure that we can satisfy the grid search interface.
from nolearn.lasagne import NeuralNet
nn = NeuralNet(
layers=[],
)
param_grid = {
'more_params': [{'hidden_num_units': 100}, {'hidden_num_units': 200}],
'update_momentum': [0.9, 0.98],
}
X, y = mnist
vars_hist = []
def fit(self, X, y):
vars_hist.append(vars(self).copy())
return self
with patch.object(NeuralNet, 'fit', autospec=True) as mock_fit:
mock_fit.side_effect = fit
with patch('nolearn.lasagne.NeuralNet.score') as score:
score.return_value = 0.3
gs = GridSearchCV(nn, param_grid, cv=2, refit=False, verbose=4)
gs.fit(X, y)
assert [entry['update_momentum'] for entry in vars_hist] == [
0.9, 0.9, 0.98, 0.98] * 2
assert [entry['more_params'] for entry in vars_hist] == (
[{'hidden_num_units': 100}] * 4 +
[{'hidden_num_units': 200}] * 4
)
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
'loss',
'objective',
'train_split',
'eval_size',
'X_tensor_type',
'on_epoch_finished',
'on_training_started',
'on_training_finished',
'custom_score',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
nn = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('output', DenseLayer),
],
input_shape=(128, 13),
hidden1_num_units=100,
output_nonlinearity=identity,
output_num_units=1,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
class TestDefaultObjective:
@pytest.fixture
def get_output(self, monkeypatch):
from nolearn.lasagne import base
get_output_mock = Mock()
monkeypatch.setattr(base, 'get_output', get_output_mock)
return get_output_mock
@pytest.fixture
def objective(self):
from nolearn.lasagne.base import objective
return objective
def test_with_defaults(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
result = objective(
[1, 2, 3], loss_function=loss_function, target=target)
assert result == 2.0
get_output.assert_called_with(3, deterministic=False)
loss_function.assert_called_with(get_output.return_value, target)
def test_with_get_output_kw(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
objective(
[1, 2, 3], loss_function=loss_function, target=target,
get_output_kw={'i_was': 'here'},
)
get_output.assert_called_with(3, deterministic=False, i_was='here')
class TestTrainSplit:
@pytest.fixture
def TrainSplit(self):
from nolearn.lasagne import TrainSplit
return TrainSplit
def test_reproducable(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train1, X_valid1, y_train1, y_valid1 = TrainSplit(0.2)(
X, y, nn)
X_train2, X_valid2, y_train2, y_valid2 = TrainSplit(0.2)(
X, y, nn)
assert np.all(X_train1 == X_train2)
assert np.all(y_valid1 == y_valid2)
def test_eval_size_zero(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.0)(
X, y, nn)
assert len(X_train) == len(X)
assert len(y_train) == len(y)
assert len(X_valid) == 0
assert len(y_valid) == 0
def test_eval_size_half(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.51)(
X, y, nn)
assert len(X_train) + len(X_valid) == 100
assert len(y_train) + len(y_valid) == 100
assert len(X_train) > 45
def test_regression(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.random.random((100))
nn.regression = True
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train) == len(y_train) == 80
assert len(X_valid) == len(y_valid) == 20
def test_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert y_train.sum() == 0.8 * 25
assert y_valid.sum() == 0.2 * 25
def test_not_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2, stratify=False)(
X, y, nn)
assert y_train.sum() == 25
assert y_valid.sum() == 0
class TestTrainTestSplitBackwardCompatibility:
@pytest.fixture
def LegacyNet(self, NeuralNet):
class LegacyNet(NeuralNet):
def train_test_split(self, X, y, eval_size):
self.__call_args__ = (X, y, eval_size)
split = int(X.shape[0] * eval_size)
return X[:split], X[split:], y[:split], y[split:]
return LegacyNet
def test_legacy_eval_size(self, NeuralNet):
net = NeuralNet([], eval_size=0.3, max_epochs=0)
assert net.train_split.eval_size == 0.3
def test_legacy_method_default_eval_size(self, LegacyNet):
net = LegacyNet([], max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.2)
def test_legacy_method_given_eval_size(self, LegacyNet):
net = LegacyNet([], eval_size=0.3, max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.3)
class TestCheckForUnusedKwargs:
def test_okay(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
net.initialize()
def test_unused(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
yourlayer_ho='ho',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
with pytest.raises(ValueError) as err:
net.initialize()
assert str(err.value) == 'Unused kwarg: yourlayer_ho'
class TestInitializeLayers:
def test_initialization(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {'shape': (10, 10), 'name': 'input'}),
(hidden1, {'some': 'param', 'another': 'param'}),
(hidden2, {}),
(output, {'name': 'output'}),
],
input_shape=(10, 10),
mock1_some='iwin',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='mock1',
some='iwin', another='param')
assert nn.layers_['mock1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='mock2')
assert nn.layers_['mock2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out is nn.layers_['output']
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out is nn.layers_['output']
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
class TestCheckGoodInput:
@pytest.fixture
def check_good_input(self, nn):
return nn._check_good_input
@pytest.fixture
def X(self):
return np.arange(100).reshape(10, 10).astype(floatX)
@pytest.fixture
def y(self):
return np.arange(10).astype(np.int32)
@pytest.fixture
def y_regr(self):
return np.arange(10).reshape(-1, 1).astype(floatX)
def test_X_OK(self, check_good_input, X):
assert check_good_input(X) == (X, None)
def test_X_and_y_OK(self, check_good_input, X, y):
assert check_good_input(X, y) == (X, y)
def test_X_and_y_OK_regression(self, nn, check_good_input, X, y_regr):
nn.regression = True
assert check_good_input(X, y_regr) == (X, y_regr)
def test_X_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
X[:9],
y
)
def test_X_dict_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
{'one': X, 'two': X},
y[:9],
)
def test_X_dict_length_mismatch(self, check_good_input, X):
with pytest.raises(ValueError):
check_good_input({
'one': X,
'two': X[:9],
})
def test_y_regression_1dim(self, nn, check_good_input, X, y_regr):
y = y_regr.reshape(-1)
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y.reshape(-1, 1)).all()
def test_y_regression_2dim(self, nn, check_good_input, X, y_regr):
y = y_regr
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y).all()
class TestMultiInputFunctional:
@pytest.fixture(scope='session')
def net(self, NeuralNet):
return NeuralNet(
layers=[
(InputLayer,
{'name': 'input1', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden1', 'num_units': 98}),
(InputLayer,
{'name': 'input2', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden2', 'num_units': 98}),
(ConcatLayer,
{'incomings': ['hidden1', 'hidden2']}),
(DenseLayer,
{'name': 'hidden3', 'num_units': 98}),
(DenseLayer,
{'name': 'output', 'num_units': 10, 'nonlinearity': softmax}),
],
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=2,
verbose=4,
)
@pytest.fixture(scope='session')
def net_fitted(self, net, mnist):
X, y = mnist
X_train, y_train = X[:10000], y[:10000]
X_train1, X_train2 = X_train[:, :392], X_train[:, 392:]
return net.fit({'input1': X_train1, 'input2': X_train2}, y_train)
@pytest.fixture(scope='session')
def y_pred(self, net_fitted, mnist):
X, y = mnist
X_test = X[60000:]
X_test1, X_test2 = X_test[:, :392], X_test[:, 392:]
return net_fitted.predict({'input1': X_test1, 'input2': X_test2})
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/io/msgpack/test_seq.py | 14 | 1171 | # coding: utf-8
import io
import pandas.io.msgpack as msgpack
binarydata = bytes(bytearray(range(256)))
def gen_binary_data(idx):
return binarydata[:idx % 300]
def test_exceeding_unpacker_read_size():
dumpf = io.BytesIO()
packer = msgpack.Packer()
NUMBER_OF_STRINGS = 6
read_size = 16
# 5 ok for read_size=16, while 6 glibc detected *** python: double free or
# corruption (fasttop):
# 20 ok for read_size=256, while 25 segfaults / glibc detected *** python:
# double free or corruption (!prev)
# 40 ok for read_size=1024, while 50 introduces errors
# 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected ***
# python: double free or corruption (!prev):
for idx in range(NUMBER_OF_STRINGS):
data = gen_binary_data(idx)
dumpf.write(packer.pack(data))
f = io.BytesIO(dumpf.getvalue())
dumpf.close()
unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
read_count = 0
for idx, o in enumerate(unpacker):
assert type(o) == bytes
assert o == gen_binary_data(idx)
read_count += 1
assert read_count == NUMBER_OF_STRINGS
| apache-2.0 |
victorbergelin/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
jbonaiuto/perceptual-choice-hysteresis | src/python/perceptchoice/model/utils.py | 1 | 3691 | from brian import second, ms, hertz
from matplotlib.patches import Rectangle
from matplotlib.pyplot import figure, subplot, ylim, legend, ylabel, title, xlabel
import numpy as np
def get_response_time(e_firing_rates, stim_start_time, stim_end_time, upper_threshold=60, threshold_diff=None, dt=.1*ms):
rate_1=e_firing_rates[0]
rate_2=e_firing_rates[1]
times=np.array(range(len(rate_1)))*(dt/second)
rt=None
decision_idx=-1
for idx,time in enumerate(times):
time=time*second
if stim_start_time < time < stim_end_time:
if rt is None:
if rate_1[idx]>=upper_threshold and (threshold_diff is None or rate_1[idx]-rate_2[idx]>=threshold_diff):
decision_idx=0
rt=(time-stim_start_time)/ms
break
elif rate_2[idx]>=upper_threshold and (threshold_diff is None or rate_2[idx]-rate_1[idx]>=threshold_diff):
decision_idx=1
rt=(time-stim_start_time)/ms
break
return rt,decision_idx
def plot_network_firing_rates(e_rates, sim_params, network_params, std_e_rates=None, i_rate=None, std_i_rate=None,
plt_title=None, labels=None, ax=None):
rt, choice = get_response_time(e_rates, sim_params.stim_start_time, sim_params.stim_end_time,
upper_threshold = network_params.resp_threshold, dt = sim_params.dt)
if ax is None:
figure()
max_rates=[network_params.resp_threshold]
if i_rate is not None:
max_rates.append(np.max(i_rate[500:]))
for i in range(network_params.num_groups):
max_rates.append(np.max(e_rates[i,500:]))
max_rate=np.max(max_rates)
if i_rate is not None:
ax=subplot(211)
elif ax is None:
ax=subplot(111)
rect=Rectangle((0,0),(sim_params.stim_end_time-sim_params.stim_start_time)/ms, max_rate+5,
alpha=0.25, facecolor='yellow', edgecolor='none')
ax.add_patch(rect)
for idx in range(network_params.num_groups):
label='e %d' % idx
if labels is not None:
label=labels[idx]
time_ticks=(np.array(range(e_rates.shape[1]))*sim_params.dt)/ms-sim_params.stim_start_time/ms
baseline,=ax.plot(time_ticks, e_rates[idx,:], label=label)
if std_e_rates is not None:
ax.fill_between(time_ticks, e_rates[idx,:]-std_e_rates[idx,:], e_rates[idx,:]+std_e_rates[idx,:], alpha=0.5,
facecolor=baseline.get_color())
ylim(0,max_rate+5)
ax.plot([0-sim_params.stim_start_time/ms, (sim_params.trial_duration-sim_params.stim_start_time)/ms],
[network_params.resp_threshold/hertz, network_params.resp_threshold/hertz], 'k--')
ax.plot([rt,rt],[0, max_rate+5],'k--')
legend(loc='best')
ylabel('Firing rate (Hz)')
if plt_title is not None:
title(plt_title)
if i_rate is not None:
ax=subplot(212)
rect=Rectangle((0,0),(sim_params.stim_end_time-sim_params.stim_start_time)/ms, max_rate+5,
alpha=0.25, facecolor='yellow', edgecolor='none')
ax.add_patch(rect)
label='i'
if labels is not None:
label=labels[network_params.num_groups]
time_ticks=(np.array(range(len(i_rate)))*sim_params.dt)/ms-sim_params.stim_start_time/ms
baseline,=ax.plot(time_ticks, i_rate, label=label)
if std_i_rate is not None:
ax.fill_between(time_ticks, i_rate-std_i_rate, i_rate+std_i_rate, alpha=0.5, facecolor=baseline.get_color())
ylim(0,max_rate+5)
ax.plot([rt,rt],[0, max_rate],'k--')
ylabel('Firing rate (Hz)')
xlabel('Time (ms)')
| gpl-3.0 |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| agpl-3.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/examples/ex_kernel_regression2.py | 3 | 1470 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
y_true = np.sin(x*5)/x + 2*x
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
model = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls',
defaults=nparam.EstimatorSettings(efficient=True))
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
model1 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls')
mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='ll',
var_type='c', bw='cv_ls')
mean2, mfx2 = model2.fit()
print model.bw
print model1.bw
print model2.bw
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean, lw=2, label='kernel mean')
ax.plot(x, mean2, lw=2, label='kernel mean')
ax.legend()
plt.show()
| apache-2.0 |
IshankGulati/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
devanshdalal/scikit-learn | sklearn/ensemble/forest.py | 8 | 67993 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
zorojean/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/resample/test_base.py | 2 | 7259 | from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.groupby.groupby import DataError
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import period_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.resample import _asfreq_compat
# a fixture value can be overridden by the test parameter value. Note that the
# value of the fixture can be overridden this way even if the test doesn't use
# it directly (doesn't mention it in the function prototype).
# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization # noqa
# in this module we override the fixture values defined in conftest.py
# tuples of '_index_factory,_series_name,_index_start,_index_end'
DATE_RANGE = (date_range, "dti", datetime(2005, 1, 1), datetime(2005, 1, 10))
PERIOD_RANGE = (period_range, "pi", datetime(2005, 1, 1), datetime(2005, 1, 10))
TIMEDELTA_RANGE = (timedelta_range, "tdi", "1 day", "10 day")
all_ts = pytest.mark.parametrize(
"_index_factory,_series_name,_index_start,_index_end",
[DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE],
)
@pytest.fixture
def create_index(_index_factory):
def _create_index(*args, **kwargs):
""" return the _index_factory created using the args, kwargs """
return _index_factory(*args, **kwargs)
return _create_index
@pytest.mark.parametrize("freq", ["2D", "1H"])
@pytest.mark.parametrize(
"_index_factory,_series_name,_index_start,_index_end", [DATE_RANGE, TIMEDELTA_RANGE]
)
def test_asfreq(series_and_frame, freq, create_index):
obj = series_and_frame
result = obj.resample(freq).asfreq()
new_index = create_index(obj.index[0], obj.index[-1], freq=freq)
expected = obj.reindex(new_index)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"_index_factory,_series_name,_index_start,_index_end", [DATE_RANGE, TIMEDELTA_RANGE]
)
def test_asfreq_fill_value(series, create_index):
# test for fill value during resampling, issue 3715
s = series
result = s.resample("1H").asfreq()
new_index = create_index(s.index[0], s.index[-1], freq="1H")
expected = s.reindex(new_index)
tm.assert_series_equal(result, expected)
frame = s.to_frame("value")
frame.iloc[1] = None
result = frame.resample("1H").asfreq(fill_value=4.0)
new_index = create_index(frame.index[0], frame.index[-1], freq="1H")
expected = frame.reindex(new_index, fill_value=4.0)
tm.assert_frame_equal(result, expected)
@all_ts
def test_resample_interpolate(frame):
# # 12925
df = frame
tm.assert_frame_equal(
df.resample("1T").asfreq().interpolate(), df.resample("1T").interpolate()
)
def test_raises_on_non_datetimelike_index():
# this is a non datetimelike index
xp = DataFrame()
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, "
"but got an instance of 'Index'"
)
with pytest.raises(TypeError, match=msg):
xp.resample("A").mean()
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
def test_resample_empty_series(freq, empty_series_dti, resample_method):
# GH12771 & GH12868
if resample_method == "ohlc":
pytest.skip("need to test for ohlc from GH13083")
s = empty_series_dti
result = getattr(s.resample(freq), resample_method)()
expected = s.copy()
expected.index = _asfreq_compat(s.index, freq)
tm.assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
tm.assert_series_equal(result, expected, check_dtype=False)
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
@pytest.mark.parametrize("resample_method", ["count", "size"])
def test_resample_count_empty_series(freq, empty_series_dti, resample_method):
# GH28427
result = getattr(empty_series_dti.resample(freq), resample_method)()
index = _asfreq_compat(empty_series_dti.index, freq)
expected = Series([], dtype="int64", index=index, name=empty_series_dti.name)
tm.assert_series_equal(result, expected)
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
def test_resample_empty_dataframe(empty_frame_dti, freq, resample_method):
# GH13212
df = empty_frame_dti
# count retains dimensions too
result = getattr(df.resample(freq), resample_method)()
if resample_method != "size":
expected = df.copy()
else:
# GH14962
expected = Series([], dtype=object)
expected.index = _asfreq_compat(df.index, freq)
tm.assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
tm.assert_almost_equal(result, expected, check_dtype=False)
# test size for GH13212 (currently stays as df)
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
def test_resample_count_empty_dataframe(freq, empty_frame_dti):
# GH28427
empty_frame_dti["a"] = []
result = empty_frame_dti.resample(freq).count()
index = _asfreq_compat(empty_frame_dti.index, freq)
expected = DataFrame({"a": []}, dtype="int64", index=index)
tm.assert_frame_equal(result, expected)
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
def test_resample_size_empty_dataframe(freq, empty_frame_dti):
# GH28427
empty_frame_dti["a"] = []
result = empty_frame_dti.resample(freq).size()
index = _asfreq_compat(empty_frame_dti.index, freq)
expected = Series([], dtype="int64", index=index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", tm.all_timeseries_index_generator(0))
@pytest.mark.parametrize("dtype", [float, int, object, "datetime64[ns]"])
def test_resample_empty_dtypes(index, dtype, resample_method):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
empty_series_dti = Series([], index, dtype)
try:
getattr(empty_series_dti.resample("d"), resample_method)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
@all_ts
def test_apply_to_empty_series(empty_series_dti):
# GH 14313
s = empty_series_dti
for freq in ["M", "D", "H"]:
result = s.resample(freq).apply(lambda x: 1)
expected = s.resample(freq).apply(np.sum)
tm.assert_series_equal(result, expected, check_dtype=False)
@all_ts
def test_resampler_is_iterable(series):
# GH 15314
freq = "H"
tg = Grouper(freq=freq, convention="start")
grouped = series.groupby(tg)
resampled = series.resample(freq)
for (rk, rv), (gk, gv) in zip(resampled, grouped):
assert rk == gk
tm.assert_series_equal(rv, gv)
@all_ts
def test_resample_quantile(series):
# GH 15023
s = series
q = 0.75
freq = "H"
result = s.resample(freq).quantile(q)
expected = s.resample(freq).agg(lambda x: x.quantile(q)).rename(s.name)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
JeffRoy/mi-dataset | utils/parse_file.py | 3 | 6615 | #!/usr/bin/env python
import importlib
import json
import os
from functools import wraps
import click as click
import datetime
import pandas as pd
import xarray as xr
import numpy as np
from mi.core.log import get_logger, LoggerManager
try:
import cPickle as pickle
except ImportError:
import pickle
lm = LoggerManager()
log = get_logger()
base_path = os.path.dirname(os.path.dirname(__file__))
class StopWatch(object):
"""
Easily measure elapsed time
"""
def __init__(self, message=None):
self.start_time = datetime.datetime.now()
self.message = message
def __repr__(self):
stop = datetime.datetime.now()
r = str(stop - self.start_time)
if self.message:
return self.message + ' ' + r
return r
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
log.info(self)
def monkey_patch_particles():
"""
Monkey patch DataParticle.generate to skip the JSON-encoding
:return:
"""
log.info('Monkey patching DataParticle.generate')
import mi.core.instrument.data_particle
def _generate(self, sorted=False):
return self.generate_dict()
mi.core.instrument.data_particle.DataParticle.generate = _generate
def log_timing(func):
"""
Decorator which will log the time elapsed while executing a function call
:param func: function to be wrapped
:return: wrapped function
"""
@wraps(func)
def inner(*args, **kwargs):
with StopWatch('Function %s took:' % func):
return func(*args, **kwargs)
return inner
class ParticleHandler(object):
"""
Particle handler which flattens all data particle "values" lists to key: value pairs in the parent dictionary
Also contains a method to output the particle data as a dictionary of pandas dataframes
"""
def __init__(self, output_path=None, formatter=None):
self.samples = {}
self.failure = False
if output_path is None:
output_path = os.getcwd()
self.output_path = output_path
self.formatter = formatter
self.check_output_path()
def check_output_path(self):
op = self.output_path
if os.path.isdir(op):
return
if os.path.isfile(op):
raise OSError('output path is a file!')
else:
os.makedirs(op)
@staticmethod
def flatten(sample):
values = sample.pop('values')
for each in values:
sample[each['value_id']] = each['value']
return sample
def addParticleSample(self, sample_type, sample):
sample = self.flatten(sample)
self.samples.setdefault(sample_type, []).append(sample)
def setParticleDataCaptureFailure(self):
self.failure = True
@log_timing
def to_dataframes(self):
data_frames = {}
for particle_type in self.samples:
data_frames[particle_type] = self.fix_arrays(pd.DataFrame(self.samples[particle_type]))
return data_frames
def to_datasets(self):
datasets = {}
for particle_type in self.samples:
datasets[particle_type] = self.fix_arrays(pd.DataFrame(self.samples[particle_type]), return_as_xr=True)
return datasets
@staticmethod
@log_timing
def fix_arrays(data_frame, return_as_xr=False):
# round-trip the dataframe through xray to get the multidimensional indexing correct
new_ds = xr.Dataset()
for each in data_frame:
if data_frame[each].dtype == 'object' and isinstance(data_frame[each].values[0], list):
data = np.array([np.array(x) for x in data_frame[each].values])
new_ds[each] = xr.DataArray(data)
else:
new_ds[each] = data_frame[each]
if return_as_xr:
return new_ds
return new_ds.to_dataframe()
@log_timing
def to_csv(self):
dataframes = self.to_dataframes()
for particle_type in dataframes:
file_path = os.path.join(self.output_path, '%s.csv' % particle_type)
dataframes[particle_type].to_csv(file_path)
@log_timing
def to_json(self):
for particle_type in self.samples:
file_path = os.path.join(self.output_path, '%s.json' % particle_type)
with open(file_path, 'w') as fh:
json.dump(self.samples[particle_type], fh)
@log_timing
def to_pd_pickle(self):
dataframes = self.to_dataframes()
for particle_type in dataframes:
file_path = os.path.join(self.output_path, '%s.pd' % particle_type)
with open(file_path, 'w') as fh:
pickle.dump(dataframes[particle_type], fh, protocol=-1)
@log_timing
def to_xr_pickle(self):
datasets = self.to_datasets()
for particle_type in datasets:
file_path = os.path.join(self.output_path, '%s.xr' % particle_type)
with open(file_path, 'w') as fh:
pickle.dump(datasets[particle_type], fh, protocol=-1)
def write(self):
option_map = {
'csv': self.to_csv,
'json': self.to_json,
'pd-pickle': self.to_pd_pickle,
'xr-pickle': self.to_xr_pickle
}
formatter = option_map[self.formatter]
formatter()
def find_driver(driver_string):
try:
return importlib.import_module(driver_string)
except ImportError:
if os.sep in driver_string:
driver_string = driver_string.replace('.py', '')
driver_string = driver_string.replace(os.sep, '.')
return importlib.import_module(driver_string)
raise Exception('Unable to locate driver: %r', driver_string)
def run(driver, files, fmt, out):
monkey_patch_particles()
log.info('Importing driver: %s', driver)
module = find_driver(driver)
particle_handler = ParticleHandler(output_path=out, formatter=fmt)
for file_path in files:
log.info('Begin parsing: %s', file_path)
with StopWatch('Parsing file: %s took' % file_path):
module.parse(base_path, file_path, particle_handler)
particle_handler.write()
@click.command()
@click.option('--fmt', type=click.Choice(['csv', 'json', 'pd-pickle', 'xr-pickle']), default='csv')
@click.option('--out', type=click.Path(exists=False), default=None)
@click.argument('driver', nargs=1)
@click.argument('files', nargs=-1, type=click.Path(exists=True))
def main(driver, files, fmt, out):
run(driver, files, fmt, out)
if __name__ == '__main__':
main()
| bsd-2-clause |
jgomezdans/sampyl | examples/slice_sample.py | 2 | 1708 | import sys
sys.path.append('.')
import sampyl as smp
from sampyl.state import State
from sampyl import np
from sampyl.diagnostics import diagnostics
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
# correlated gaussian log likelihood
def logp(x, y):
icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]]))
d = np.array([x, y])
return -.5 * np.dot(np.dot(d, icov), d)
logp_xy = lambda(th): logp(th[0], th[1])
# compare slice samplers, metropolis hastings, and the two variable
# slice sampler
ssamp = smp.Slice(logp, start={'x': 4., 'y': 4.} )
slice_trace = ssamp.sample(1000)
met = smp.Metropolis(logp, start={'x': 4., 'y': 4.})
met_trace = met.sample(1000)
bslice = smp.Slice(logp_xy, start={'th': np.array([4., 4.])})
btrace = bslice.sample(1000)
# compute effective sample size based on autocorrelation
slice_eff = diagnostics.compute_n_eff_acf(slice_trace.x)
met_eff = diagnostics.compute_n_eff_acf(met_trace.x)
b_eff = diagnostics.compute_n_eff_acf(btrace.th[:,0])
print "Slice effective sample size: %2.2f"%slice_eff
print "MH effective sample size: %2.2f"%met_eff
print "two var slice effective sample size: %2.2f"%b_eff
print " ----- "
print "Slice sampler evals per sample: ", ssamp.evals_per_sample
# graphically compare samples
fig, axarr = plt.subplots(1, 3, figsize=(12,4))
axarr[0].scatter(slice_trace.x, slice_trace.y)
axarr[0].set_title("Slice samples")
axarr[1].scatter(met_trace.x, met_trace.y)
axarr[1].set_title("MH samples")
axarr[2].scatter(btrace.th[:,0], btrace.th[:,1])
axarr[2].set_title("Two var Slice samples")
for ax in axarr:
ax.set_xlim((-4, 4))
ax.set_ylim((-4, 4))
plt.show()
| apache-2.0 |
tboch/mocpy | mocpy/moc/moc.py | 1 | 35073 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
from urllib.parse import urlencode
from io import BytesIO
from astropy.utils.data import download_file
from astropy import units as u
from astropy.io import fits
from astropy.coordinates import ICRS, Galactic, BaseCoordinateFrame
from astropy.coordinates import SkyCoord, Angle
from astropy import wcs
import cdshealpix
try:
from astropy_healpix import HEALPix
except ImportError:
pass
from ..abstract_moc import AbstractMOC
from ..interval_set import IntervalSet
from .. import core
from .boundaries import Boundaries
from .plot import fill, border
__author__ = "Thomas Boch, Matthieu Baumann"
__copyright__ = "CDS, Centre de Données astronomiques de Strasbourg"
__license__ = "BSD 3-Clause License"
__email__ = "[email protected], [email protected]"
class MOC(AbstractMOC):
"""
Multi-order spatial coverage class.
A MOC describes the coverage of an arbitrary region on the unit sphere.
MOCs are usually used for describing the global coverage of catalog/image surveys such as GALEX or SDSS.
A MOC corresponds to a list of `HEALPix <https://healpix.sourceforge.io/>`__ cells at different depths.
This class gives you the possibility to:
1. Define `~mocpy.moc.MOC` objects:
- From a FITS file that stores HEALPix cells (see `from_fits`).
- Directly from a list of HEALPix cells expressed either as a numpy structural array (see `from_healpix_cells`) or a simple
python dictionnary (see `from_json`).
- From a list of sky coordinates (see `from_skycoords`, `from_lonlat`).
- From a convex/concave polygon (see `from_polygon`).
- From a cone (will be implemented in a next version).
2. Perform fast logical operations between `~mocpy.moc.MOC` objects:
- The `intersection`
- The `union`
- The `difference`
- The `complement`
3. Plot the `~mocpy.moc.MOC` objects:
- Draw the MOC with its HEALPix cells (see `fill`)
- Draw the perimeter of a MOC (see `border`)
4. Get the sky coordinates defining the border(s) of `~mocpy.moc.MOC` objects (see `get_boundaries`).
5. Serialize `~mocpy.moc.MOC` objects to `astropy.io.fits.HDUList` or JSON dictionary and save it to a file.
"""
def __init__(self, interval_set=None):
super(MOC, self).__init__(interval_set)
def contains(self, ra, dec, keep_inside=True):
"""
Returns a boolean mask array of the positions lying inside (or outside) the MOC instance.
Parameters
----------
ra : `astropy.units.Quantity`
Right ascension array
dec : `astropy.units.Quantity`
Declination array
keep_inside : bool, optional
True by default. If so the mask describes coordinates lying inside the MOC. If ``keep_inside``
is false, contains will return the mask of the coordinates lying outside the MOC.
Returns
-------
array : `~np.ndarray`
A mask boolean array
"""
max_depth = self.max_order
m = np.zeros(3 << (2*(max_depth + 1)), dtype=bool)
pix_id = core.flatten_pixels(self._interval_set._intervals, max_depth)
m[pix_id] = True
if not keep_inside:
m = np.logical_not(m)
pix = cdshealpix.lonlat_to_healpix(ra, dec, max_depth)
return m[pix]
def add_neighbours(self):
"""
Extends the MOC instance so that it includes the HEALPix cells touching its border.
The depth of the HEALPix cells added at the border is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self extended by one degree of neighbours.
"""
max_depth = self.max_order
# Get the pixels array of the MOC at the its max order.
ipix = core.flatten_pixels(self._interval_set._intervals, max_depth)
# Get the HEALPix array containing the neighbors of ``ipix``.
# This array "extends" ``ipix`` by one degree of neighbors.
ipix_extended = cdshealpix.neighbours(ipix, max_depth)
ipix_extended = ipix_extended[ipix_extended >= 0]
ipix_extended = ipix_extended.astype(np.uint64)
# Compute the difference between ``extend_ipix`` and ``ipix`` to get only the neighboring pixels
# located at the border of the MOC.
ipix_neighbors = np.setdiff1d(ipix_extended, ipix)
depth_neighbors = np.full(shape=ipix_neighbors.shape, fill_value=max_depth, dtype=np.int8)
#intervals_neighbors = core.from_healpix_cells(ipix_neighbors, depth_neighbors)
moc_neighbors = MOC.from_healpix_cells(ipix_neighbors, depth_neighbors)
# This array of HEALPix neighbors are added to the MOC to get an ``extended`` MOC
#self._interval_set._intervals = core.coverage_union(self._interval_set._intervals, moc_neighbors._interval_set._intervals)
final = self.union(moc_neighbors)
self._interval_set = final._interval_set
return self
def remove_neighbours(self):
"""
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
"""
max_depth = self.max_order
# Get the HEALPix cells of the MOC at its max depth
ipix = core.flatten_pixels(self._interval_set._intervals, max_depth)
# Get the HEALPix array containing the neighbors of ``ipix``.
# This array "extends" ``ipix`` by one degree of neighbors.
ipix_extended = cdshealpix.neighbours(ipix, max_depth)
ipix_extended = ipix_extended[ipix_extended >= 0]
ipix_extended = ipix_extended.astype(np.uint64)
# Get only the max depth HEALPix cells lying at the border of the MOC
ipix_neighbors = np.setxor1d(ipix_extended, ipix)
# Remove these pixels from ``ipix``
ipix_around_border = cdshealpix.neighbours(ipix_neighbors, max_depth)
ipix_around_border = ipix_around_border[ipix_around_border >= 0]
ipix_around_border = ipix_around_border.astype(np.uint64)
final_ipix = np.setdiff1d(ipix, ipix_around_border)
final_depth = np.full(shape=final_ipix.shape, fill_value=max_depth, dtype=np.int8)
# Build the reduced MOC, i.e. MOC without its pixels which were located at its border.
final = MOC.from_healpix_cells(final_ipix, final_depth)
self._interval_set = final._interval_set
return self
def fill(self, ax, wcs, **kw_mpl_pathpatch):
"""
Draws the MOC on a matplotlib axis.
This performs the projection of the cells from the world coordinate system to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`.
Examples
--------
>>> from mocpy import MOC, World2ScreenMPL
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with World2ScreenMPL(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call fill giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... # We will set the matplotlib keyword linewidth to 0 so that it does not plot
... # the border of each HEALPix cell.
... # The color can also be specified along with an alpha value.
... moc.fill(ax=ax, wcs=wcs, linewidth=0, alpha=0.5, fill=True, color="green")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
fill.fill(self, ax, wcs, **kw_mpl_pathpatch)
def border(self, ax, wcs, **kw_mpl_pathpatch):
"""
Draws the MOC border(s) on a matplotlib axis.
This performs the projection of the sky coordinates defining the perimeter of the MOC to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`
Examples
--------
>>> from mocpy import MOC, World2ScreenMPL
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with World2ScreenMPL(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call border giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... moc.border(ax=ax, wcs=wcs, alpha=0.5, color="red")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
border.border(self, ax, wcs, **kw_mpl_pathpatch)
def get_boundaries(self, order=None):
"""
Returns the sky coordinates defining the border(s) of the MOC.
The border(s) are expressed as a list of SkyCoord.
Each SkyCoord refers to the coordinates of one border of the MOC (i.e.
either a border of a connexe MOC part or a border of a hole
located in a connexe MOC part).
This function is currently not stable: encoding a vertice of a
HEALPix cell (N, E, S, W) should not depend on the position of the
vertice but rather on the uniq value (+ 2 bits to encode the direction
of the vertice).
Parameters
----------
order : int
The depth of the MOC before computing its boundaries.
A shallow depth leads to a faster computation.
By default the maximum depth of the MOC is taken.
Raises
------
DeprecationWarning
This method is not stable and not tested! A future more stable algorithm will be implemented!
Return
------
coords: [`~astropy.coordinates.SkyCoord`]
A list of `~astropy.coordinates.SkyCoord` each describing one border.
"""
import warnings
warnings.simplefilter('default')
warnings.warn('This method is not stable. A future more stable algorithm will be implemented!', DeprecationWarning)
return Boundaries.get(self, order)
@classmethod
def from_fits_image(cls, hdu, max_norder, mask=None):
"""
Creates a `~mocpy.moc.MOC` from an image stored as a FITS file.
Parameters
----------
hdu : HDU object
HDU containing the data of the image
max_norder : int
The moc resolution.
mask : `numpy.ndarray`, optional
A boolean array of the same size of the image where pixels having the value 1 are part of
the final MOC and pixels having the value 0 are not.
Returns
-------
moc : `~mocpy.moc.MOC`
The resulting MOC.
"""
# Only take the first HDU
header = hdu.header
height = header['NAXIS2']
width = header['NAXIS1']
# Compute a WCS from the header of the image
w = wcs.WCS(header)
if mask is None:
data = hdu.data
# A mask is computed discarding nan floating values
mask = np.isfinite(data)
# If the BLANK keyword is set to a value then we mask those
# pixels too
if header.get('BLANK') is not None:
discard_val = header['BLANK']
# We keep the finite values and those who are not equal to the BLANK field
mask = mask & (data != discard_val)
y, x = np.where(mask)
pix = np.dstack((x, y))[0]
world = w.wcs_pix2world(pix, 0)
# Remove coord containing inf/nan values
good = np.isfinite(world)
# It is a good coordinates whether both its coordinate are good
good = good[:, 0] & good[:, 1]
world = world[good]
# Get the frame from the wcs
frame = wcs.utils.wcs_to_celestial_frame(w)
skycrd = SkyCoord(
world,
unit="deg",
frame=frame
)
# Compute the order based on the CDELT
c1 = header['CDELT1']
c2 = header['CDELT2']
max_res_px = np.sqrt(c1*c1 + c2*c2) * np.pi / 180.0
max_depth_px = int(np.floor(np.log2(np.pi / (3 * max_res_px * max_res_px)) / 2))
max_norder = min(max_norder, max_depth_px)
moc = MOC.from_lonlat(
lon=skycrd.icrs.ra,
lat=skycrd.icrs.dec,
max_norder=max_norder
)
return moc
@classmethod
def from_fits_images(cls, path_l, max_norder):
"""
Loads a MOC from a set of FITS file images.
Assumes the data of the image is stored in the first HDU of the FITS file.
Please call `~mocpy.moc.MOC.from_fits_image` for passing another hdu than the first one.
Parameters
----------
path_l : [str]
A list of path where the fits image are located.
max_norder : int
The MOC resolution.
Returns
-------
moc : `~mocpy.moc.MOC`
The union of all the MOCs created from the paths found in ``path_l``.
"""
moc = MOC()
for filename in path_l:
with fits.open(filename) as hdul:
current_moc = MOC.from_fits_image(hdu=hdul[0], max_norder=max_norder)
moc = moc.union(current_moc)
return moc
@classmethod
def from_vizier_table(cls, table_id, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
nside_possible_values = (8, 16, 32, 64, 128, 256, 512)
if nside not in nside_possible_values:
raise ValueError('Bad value for nside. Must be in {0}'.format(nside_possible_values))
result = cls.from_ivorn('ivo://CDS/' + table_id, nside)
return result
MOC_SERVER_ROOT_URL = 'http://alasky.unistra.fr/MocServer/query'
@classmethod
def from_ivorn(cls, ivorn, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a given ivorn.
Parameters
----------
ivorn : str
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
return cls.from_url('%s?%s' % (MOC.MOC_SERVER_ROOT_URL,
urlencode({
'ivorn': ivorn,
'get': 'moc',
'order': int(np.log2(nside))
})))
@classmethod
def from_url(cls, url):
"""
Creates a `~mocpy.moc.MOC` object from a given url.
Parameters
----------
url : str
The url of a FITS file storing a MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
path = download_file(url, show_progress=False, timeout=60)
return cls.from_fits(path)
@classmethod
def from_skycoords(cls, skycoords, max_norder):
"""
Creates a MOC from an `astropy.coordinates.SkyCoord`.
Parameters
----------
skycoords : `astropy.coordinates.SkyCoord`
The sky coordinates that will belong to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
return cls.from_lonlat(lon=skycoords.icrs.ra, lat=skycoords.icrs.dec, max_norder=max_norder)
@classmethod
def from_lonlat(cls, lon, lat, max_norder):
"""
Creates a MOC from astropy lon, lat `astropy.units.Quantity`.
Parameters
----------
lon : `astropy.units.Quantity`
The longitudes of the sky coordinates belonging to the MOC.
lat : `astropy.units.Quantity`
The latitudes of the sky coordinates belonging to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
intervals = core.from_lonlat(max_norder, lon.to_value(u.rad).astype(np.float64), lat.to_value(u.rad).astype(np.float64))
return cls(IntervalSet(intervals, make_consistent=False))
@classmethod
def from_valued_healpix_cells(cls, uniq, values, max_depth=None, cumul_from=0.0, cumul_to=1.0):
"""
Creates a MOC from a list of uniq associated with values.
HEALPix cells are first sorted by their values.
The MOC contains the cells from which the cumulative value is between
``cumul_from`` and ``cumul_to``.
Cells being on the fence are recursively splitted and added
until the depth of the cells is equal to ``max_norder``.
Parameters
----------
uniq : `numpy.ndarray`
HEALPix cell indices written in uniq. dtype must be np.uint64
values : `numpy.ndarray`
Probabilities associated with each ``uniq`` cells. dtype must be np.float64
max_depth : int, optional
The max depth of the MOC. If a depth is given, degrade the MOC to this depth before returning it to the user.
Otherwise choose as ``max_depth`` the depth corresponding to the smallest HEALPix cell found in ``uniq``.
cumul_from : float
Cumulative value from which cells will be added to the MOC
cumul_to : float
Cumulative value to which cells will be added to the MOC
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
max_depth_tile = 0
if uniq.size > 0:
# Get the depth of the smallest uniq
# Bigger uniq corresponds to big depth HEALPix cells.
max_depth_tile = int(np.log2(uniq.max() >> 2)) >> 1
assert max_depth_tile >= 0 and max_depth_tile <= 29, "Invalid uniq numbers. Too big uniq or negative uniq numbers might the cause."
# Create the MOC at the max_depth equals to the smallest cell
# found in the uniq array
intervals = core.from_valued_hpx_cells(
np.uint8(max_depth_tile),
uniq.astype(np.uint64),
values.astype(np.float64),
np.float64(cumul_from),
np.float64(cumul_to)
)
moc = cls(IntervalSet(intervals, make_consistent=False))
# Degrade the MOC to the depth requested by the user
if max_depth is not None:
assert max_depth >= 0 and max_depth <= 29, "Max depth must be in [0, 29]"
moc = moc.degrade_to_order(max_depth)
return moc
@classmethod
def from_elliptical_cone(cls, lon, lat, a, b, pa, max_depth, delta_depth=2):
"""
Creates a MOC from an elliptical cone
The ellipse is centered around the (`lon`, `lat`) position. `a` (resp. `b`) corresponds
to the semi-major axis magnitude (resp. semi-minor axis magnitude). `pa` is expressed as a
`~astropy.coordinates.Angle` and defines the position angle of the elliptical cone.
Parameters
----------
lon : `astropy.units.Quantity`
The longitude of the center of the elliptical cone.
lat : `astropy.units.Quantity`
The latitude of the center of the elliptical cone.
a : `astropy.coordinates.Angle`
The semi-major axis angle of the elliptical cone.
b : `astropy.coordinates.Angle`
The semi-minor axis angle of the elliptical cone.
pa : `astropy.coordinates.Angle`
The position angle (i.e. the angle between the north and the semi-major axis, east-of-north).
max_depth : int
Maximum HEALPix cell resolution.
delta_depth : int, optional
To control the approximation, you can choose to perform the computations at a deeper
depth using the `depth_delta` parameter.
The depth at which the computations will be made will therefore be equal to
`depth` + `depth_delta`.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
Examples
--------
>>> from mocpy import MOC
>>> import astropy.units as u
>>> from astropy.coordinates import Angle
>>> moc = MOC.from_elliptical_cone(
... lon=0 * u.deg,
... lat=0 * u.deg,
... a=Angle(10, u.deg),
... b=Angle(5, u.deg),
... pa=Angle(0, u.deg),
... max_depth=10
... )
"""
pix, depth, fully_covered_flags = cdshealpix.elliptical_cone_search(lon, lat, a, b, pa, max_depth, delta_depth, flat=False)
return MOC.from_healpix_cells(pix, depth, fully_covered_flags)
@classmethod
def from_cone(cls, lon, lat, radius, max_depth, delta_depth=2):
"""
Creates a MOC from a cone.
The cone is centered around the (`lon`, `lat`) position with a radius expressed by
`radius`.
Parameters
----------
lon : `astropy.units.Quantity`
The longitude of the center of the cone.
lat : `astropy.units.Quantity`
The latitude of the center of the cone.
radius : `astropy.coordinates.Angle`
The radius angle of the cone.
max_depth : int
Maximum HEALPix cell resolution.
delta_depth : int, optional
To control the approximation, you can choose to perform the computations at a deeper
depth using the `depth_delta` parameter.
The depth at which the computations will be made will therefore be equal to
`max_depth` + `depth_delta`.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
Examples
--------
>>> from mocpy import MOC
>>> import astropy.units as u
>>> from astropy.coordinates import Angle
>>> moc = MOC.from_cone(
... lon=0 * u.deg,
... lat=0 * u.deg,
... radius=Angle(10, u.deg),
... max_depth=10
... )
"""
pix, depth, fully_covered_flags = cdshealpix.cone_search(lon, lat, radius, max_depth, delta_depth, flat=False)
return MOC.from_healpix_cells(pix, depth, fully_covered_flags)
@classmethod
def from_polygon_skycoord(cls, skycoord, max_depth=10):
"""
Creates a MOC from a polygon.
The polygon is given as an `astropy.coordinates.SkyCoord` that contains the
vertices of the polygon. Concave, convex and self-intersecting polygons are accepted.
Parameters
----------
skycoord : `astropy.coordinates.SkyCoord`
The sky coordinates defining the vertices of a polygon. It can describe a convex or
concave polygon but not a self-intersecting one.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
return MOC.from_polygon(lon=skycoord.icrs.ra, lat=skycoord.icrs.dec, max_depth=max_depth)
@classmethod
def from_polygon(cls, lon, lat, max_depth=10):
"""
Creates a MOC from a polygon
The polygon is given as lon and lat `astropy.units.Quantity` that define the
vertices of the polygon. Concave, convex and self-intersecting polygons are accepted.
Parameters
----------
lon : `astropy.units.Quantity`
The longitudes defining the polygon. Can describe convex and
concave polygons but not self-intersecting ones.
lat : `astropy.units.Quantity`
The latitudes defining the polygon. Can describe convex and concave
polygons but not self-intersecting ones.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
pix, depth, fully_covered_flags = cdshealpix.polygon_search(lon, lat, max_depth)
return MOC.from_healpix_cells(pix, depth, fully_covered_flags)
@classmethod
def from_healpix_cells(cls, ipix, depth, fully_covered=None):
"""
Creates a MOC from a set of HEALPix cells at a given depth.
Parameters
----------
ipix : `numpy.ndarray`
HEALPix cell indices in the NESTED notation. dtype must be np.uint64
depth : `numpy.ndarray`
Depth of the HEALPix cells. Must be of the same size of `ipix`.
dtype must be np.uint8. Corresponds to the `level` of an HEALPix cell in astropy.healpix.
fully_covered : `numpy.ndarray`, optional
HEALPix cells coverage flags. This flag informs whether a cell is
fully covered by a cone (resp. polygon, elliptical cone) or not.
Must be of the same size of `ipix`.
Raises
------
IndexError
When `ipix`, `depth` and `fully_covered` do not have the same shape
Returns
-------
moc : `~mocpy.moc.MOC`
The MOC
"""
if ipix.shape != depth.shape:
raise IndexError("pixels and depth arrays must have the same shape")
if fully_covered is not None and fully_covered.shape != ipix.shape:
raise IndexError("fully covered and depth arrays must have the same shape")
intervals = core.from_healpix_cells(ipix.astype(np.uint64), depth.astype(np.int8))
return cls(IntervalSet(intervals, make_consistent=False))
@staticmethod
def order_to_spatial_resolution(order):
"""
Convert a depth to its equivalent spatial resolution.
Parameters
----------
order : int
Spatial depth.
Returns
-------
spatial_resolution : `~astropy.coordinates.Angle`
Spatial resolution.
"""
spatial_resolution = Angle(np.sqrt(np.pi/(3 * 4**(order))), unit='rad')
return spatial_resolution
@staticmethod
def spatial_resolution_to_order(spatial_resolution):
"""
Convert a spatial resolution to a MOC order.
Parameters
----------
spatial_resolution : `~astropy.coordinates.Angle`
Spatial resolution
Returns
-------
order : int
The order corresponding to the spatial resolution
"""
res_rad = spatial_resolution.rad
order = np.ceil(np.log2(np.pi/(3*res_rad*res_rad))/2)
return np.uint8(order)
@property
def _fits_header_keywords(self):
return {
'PIXTYPE': 'HEALPIX',
'ORDERING': 'NUNIQ',
'COORDSYS': ('C', 'reference frame (C=ICRS)'),
'MOCORDER': self.max_order,
'MOCTOOL': 'MOCPy'
}
@property
def _fits_format(self):
depth = self.max_order
if depth <= 13:
fits_format = '1J'
else:
fits_format = '1K'
return fits_format
@property
def sky_fraction(self):
"""
Sky fraction covered by the MOC
"""
sky_fraction = core.coverage_sky_fraction(self._interval_set._intervals)
return sky_fraction
# TODO : move this in astroquery.Simbad.query_region
# See https://github.com/astropy/astroquery/pull/1466
def query_simbad(self, max_rows=10000):
"""
Query a view of SIMBAD data for SIMBAD objects in the coverage of the MOC instance.
"""
return self._query('SIMBAD', max_rows)
# TODO : move this in astroquery.Vizier.query_region
# See https://github.com/astropy/astroquery/pull/1466
def query_vizier_table(self, table_id, max_rows=10000):
"""
Query a VizieR table for sources in the coverage of the MOC instance.
"""
return self._query(table_id, max_rows)
# TODO : move this in astroquery
def _query(moc, resource_id, max_rows=100000):
"""
Internal method to query Simbad or a VizieR table
for sources in the coverage of the MOC instance
"""
from astropy.io.votable import parse_single_table
import requests
moc_file = BytesIO()
moc_fits = moc.serialize(format='fits')
moc_fits.writeto(moc_file)
r = requests.post('http://cdsxmatch.u-strasbg.fr/QueryCat/QueryCat',
data={'mode': 'mocfile',
'catName': resource_id,
'format': 'votable',
'limit': max_rows},
files={'moc': moc_file.getvalue()},
headers={'User-Agent': 'MOCPy'},
stream=True)
votable = BytesIO()
votable.write(r.content)
table = parse_single_table(votable).to_table()
return table
def plot(self, title='MOC', frame=None):
"""
Plot the MOC object using a mollweide projection.
**Deprecated**: New `fill` and `border` methods produce more reliable results and allow you to specify additional
matplotlib style parameters.
Parameters
----------
title : str
The title of the plot
frame : `astropy.coordinates.BaseCoordinateFrame`, optional
Describes the coordinate system the plot will be (ICRS, Galactic are the only coordinate systems supported).
"""
import warnings
warnings.simplefilter('default')
warnings.warn('This method is deprecated and is no longer tested.'
'Please refer to this documentation page for plotting MOCs using'
'matplotlib: https://mocpy.readthedocs.io/en/latest/examples/examples.html#loading-and-plotting-the-moc-of-sdss', DeprecationWarning)
frame = ICRS() if frame is None else frame
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
plot_order = 8
if self.max_order > plot_order:
plotted_moc = self.degrade_to_order(plot_order)
else:
plotted_moc = self
num_pixels_map = 1024
delta = 2. * np.pi / num_pixels_map
x = np.arange(-np.pi, np.pi, delta)
y = np.arange(-np.pi/2, np.pi/2, delta)
lon_rad, lat_rad = np.meshgrid(x, y)
hp = HEALPix(nside=(1 << plotted_moc.max_order), order='nested')
if frame and not isinstance(frame, BaseCoordinateFrame):
raise ValueError("Only Galactic/ICRS coordinate systems are supported."
"Please set `coord` to either 'C' or 'G'.")
pix_map = hp.lonlat_to_healpix(lon_rad * u.rad, lat_rad * u.rad)
m = np.zeros(12*4**(plotted_moc.max_order))
pix_id = core.flatten_pixels(plotted_moc._interval_set._intervals, plotted_moc.max_order)
# change the HEALPix cells if the frame of the MOC is not the same as the one associated with the plot method.
if isinstance(frame, Galactic):
lon, lat = hp.boundaries_lonlat(pix_id, step=2)
sky_crd = SkyCoord(lon, lat, unit='deg')
pix_id = hp.lonlat_to_healpix(sky_crd.galactic.l, sky_crd.galactic.b)
m[pix_id] = 1
z = np.flip(m[pix_map], axis=1)
plt.figure(figsize=(10, 10))
ax = plt.subplot(111, projection="mollweide")
ax.set_xticklabels(['150°', '120°', '90°', '60°', '30°', '0°', '330°', '300°', '270°', '240°', '210°', '180°'])
color_map = LinearSegmentedColormap.from_list('w2r', ['#eeeeee', '#aa0000'])
color_map.set_under('w')
color_map.set_bad('gray')
ax.pcolormesh(x, y, z, cmap=color_map, vmin=0, vmax=1)
ax.tick_params(labelsize=14, labelcolor='#000000')
plt.title(title)
plt.grid(True, linestyle='--', linewidth=1, color='#555555')
plt.show()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.