repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
espenhgn/nest-simulator | pynest/nest/tests/test_get_set.py | 5 | 21303 | # -*- coding: utf-8 -*-
#
# test_get_set.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NodeCollection get/set tests
"""
import unittest
import nest
import json
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
try:
import pandas
import pandas.util.testing as pt
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@nest.ll_api.check_stack
class TestNodeCollectionGetSet(unittest.TestCase):
"""NodeCollection get/set tests"""
def setUp(self):
nest.ResetKernel()
def test_get(self):
"""
Test that get function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
C_m = nodes.get('C_m')
node_ids = nodes.get('global_id')
E_L = nodes.get('E_L')
V_m = nodes.get('V_m')
t_ref = nodes.get('t_ref')
g = nodes.get(['local', 'thread', 'vp'])
local = g['local']
thread = g['thread']
vp = g['vp']
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(node_ids, tuple(range(1, 11)))
self.assertEqual(E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertTrue(local)
self.assertEqual(thread, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(vp, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
g_reference = {'local': (True, True, True, True, True,
True, True, True, True, True),
'thread': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'vp': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)}
self.assertEqual(g, g_reference)
def test_get_sliced(self):
"""
Test that get works on sliced NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
V_m = nodes[2:5].get('V_m')
g = nodes[5:7].get(['t_ref', 'tau_m'])
C_m = nodes[2:9:2].get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -70.0))
self.assertEqual(g['t_ref'], (2.0, 2.0))
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0))
def test_get_composite(self):
"""
Test that get function works on composite NodeCollections
"""
n1 = nest.Create('iaf_psc_alpha', 2)
n2 = nest.Create('iaf_psc_delta', 2)
n3 = nest.Create('iaf_psc_exp')
n4 = nest.Create('iaf_psc_alpha', 3)
n1.set(V_m=[-77., -88.])
n3.set({'V_m': -55.})
n1.set(C_m=[251., 252.])
n2.set(C_m=[253., 254.])
n3.set({'C_m': 255.})
n4.set(C_m=[256., 257., 258.])
n5 = n1 + n2 + n3 + n4
status_dict = n5.get()
# Check that we get values in correct order
vm_ref = (-77., -88., -70., -70., -55, -70., -70., -70.)
self.assertEqual(status_dict['V_m'], vm_ref)
# Check that we get None where not applicable
# tau_syn_ex is part of iaf_psc_alpha
tau_ref = (2., 2., None, None, 2., 2., 2., 2.)
self.assertEqual(status_dict['tau_syn_ex'], tau_ref)
# refractory_input is part of iaf_psc_delta
refrac_ref = (None, None,
False, False,
None, None,
None, None)
self.assertEqual(status_dict['refractory_input'], refrac_ref)
# Check that calling get with string works on composite NCs, both on
# parameters all the models have, and on individual parameters.
Cm_ref = [x * 1. for x in range(251, 259)]
Cm = n5.get('C_m')
self.assertEqual(list(Cm), Cm_ref)
refrac = n5.get('refractory_input')
self.assertEqual(refrac, refrac_ref)
@unittest.skipIf(not HAVE_NUMPY, 'NumPy package is not available')
def test_get_different_size(self):
"""
Test get with different input for different sizes of NodeCollections
"""
single_sd = nest.Create('spike_detector', 1)
multi_sd = nest.Create('spike_detector', 10)
empty_array_float = np.array([], dtype=np.float64)
empty_array_int = np.array([], dtype=np.int64)
# Single node, literal parameter
self.assertEqual(single_sd.get('start'), 0.0)
# Single node, array parameter
self.assertEqual(single_sd.get(['start', 'time_in_steps']),
{'start': 0.0, 'time_in_steps': False})
# Single node, hierarchical with literal parameter
np.testing.assert_array_equal(single_sd.get('events', 'times'),
empty_array_float)
# Multiple nodes, hierarchical with literal parameter
values = multi_sd.get('events', 'times')
for v in values:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, hierarchical with array parameter
values = single_sd.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
np.testing.assert_array_equal(values['senders'], empty_array_int)
np.testing.assert_array_equal(values['times'], empty_array_float)
# Multiple nodes, hierarchical with array parameter
values = multi_sd.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
self.assertEqual(len(values['senders']), len(multi_sd))
for v in values['senders']:
np.testing.assert_array_equal(v, empty_array_int)
for v in values['times']:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, no parameter (gets all values)
values = single_sd.get()
num_values_single_sd = len(values.keys())
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sd.get()
self.assertEqual(len(values.keys()), num_values_single_sd)
self.assertEqual(values['start'],
tuple(0.0 for i in range(len(multi_sd))))
@unittest.skipIf(not HAVE_PANDAS, 'Pandas package is not available')
def test_get_pandas(self):
"""
Test that get function with Pandas output works as expected.
"""
single_sd = nest.Create('spike_detector', 1)
multi_sd = nest.Create('spike_detector', 10)
empty_array_float = np.array([], dtype=np.float64)
# Single node, literal parameter
pt.assert_frame_equal(single_sd.get('start', output='pandas'),
pandas.DataFrame({'start': [0.0]},
index=tuple(single_sd.tolist())))
# Multiple nodes, literal parameter
pt.assert_frame_equal(multi_sd.get('start', output='pandas'),
pandas.DataFrame(
{'start': [0.0 for i in range(
len(multi_sd))]},
index=tuple(multi_sd.tolist())))
# Single node, array parameter
pt.assert_frame_equal(single_sd.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame({'start': [0.0],
'n_events': [0]},
index=tuple(single_sd.tolist())))
# Multiple nodes, array parameter
ref_dict = {'start': [0.0 for i in range(len(multi_sd))],
'n_events': [0]}
pt.assert_frame_equal(multi_sd.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sd.tolist())))
# Single node, hierarchical with literal parameter
pt.assert_frame_equal(single_sd.get('events', 'times',
output='pandas'),
pandas.DataFrame({'times': [[]]},
index=tuple(single_sd.tolist())))
# Multiple nodes, hierarchical with literal parameter
ref_dict = {'times': [empty_array_float
for i in range(len(multi_sd))]}
pt.assert_frame_equal(multi_sd.get('events', 'times',
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sd.tolist())))
# Single node, hierarchical with array parameter
ref_df = pandas.DataFrame(
{'times': [[]], 'senders': [[]]}, index=tuple(single_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sd.get(
'events', ['senders', 'times'], output='pandas'),
ref_df)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': [[] for i in range(len(multi_sd))],
'senders': [[] for i in range(len(multi_sd))]}
ref_df = pandas.DataFrame(
ref_dict,
index=tuple(multi_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
sd_df = multi_sd.get('events', ['senders', 'times'], output='pandas')
sd_df = sd_df.reindex(sorted(sd_df.columns), axis=1)
pt.assert_frame_equal(sd_df,
ref_df)
# Single node, no parameter (gets all values)
values = single_sd.get(output='pandas')
num_values_single_sd = values.shape[1]
self.assertEqual(values['start'][tuple(single_sd.tolist())[0]], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sd.get(output='pandas')
self.assertEqual(values.shape, (len(multi_sd), num_values_single_sd))
pt.assert_series_equal(values['start'],
pandas.Series({key: 0.0
for key in tuple(multi_sd.tolist())},
dtype=np.float64,
name='start'))
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sd)
nest.Connect(nodes, multi_sd, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [[31.8, 36.1, 38.5]],
'senders': [[17, 12, 20]]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(single_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sd.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(multi_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(multi_sd.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
def test_get_JSON(self):
"""
Test that get function with json output works as expected.
"""
single_sd = nest.Create('spike_detector', 1)
multi_sd = nest.Create('spike_detector', 10)
# Single node, literal parameter
self.assertEqual(json.loads(
single_sd.get('start', output='json')), 0.0)
# Multiple nodes, literal parameter
self.assertEqual(
json.loads(multi_sd.get('start', output='json')),
len(multi_sd) * [0.0])
# Single node, array parameter
ref_dict = {'start': 0.0, 'n_events': 0}
self.assertEqual(
json.loads(single_sd.get(['start', 'n_events'], output='json')),
ref_dict)
# Multiple nodes, array parameter
ref_dict = {'start': len(multi_sd) * [0.0],
'n_events': len(multi_sd) * [0]}
self.assertEqual(
json.loads(multi_sd.get(['start', 'n_events'], output='json')),
ref_dict)
# Single node, hierarchical with literal parameter
self.assertEqual(json.loads(single_sd.get(
'events', 'times', output='json')), [])
# Multiple nodes, hierarchical with literal parameter
ref_list = len(multi_sd) * [[]]
self.assertEqual(
json.loads(multi_sd.get('events', 'times', output='json')),
ref_list)
# Single node, hierarchical with array parameter
ref_dict = {'senders': [], 'times': []}
self.assertEqual(
json.loads(single_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': len(multi_sd) * [[]],
'senders': len(multi_sd) * [[]]}
self.assertEqual(
json.loads(multi_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Single node, no parameter (gets all values)
values = json.loads(single_sd.get(output='json'))
num_values_single_sd = len(values)
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = json.loads(multi_sd.get(output='json'))
self.assertEqual(len(values), num_values_single_sd)
self.assertEqual(values['start'], len(multi_sd) * [0.0])
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sd)
nest.Connect(nodes, multi_sd, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [31.8, 36.1, 38.5],
'senders': [17, 12, 20]}
self.assertEqual(
json.loads(single_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
self.assertEqual(
json.loads(multi_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
def test_set(self):
"""
Test that set function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
# Dict to set same value for all nodes.
nodes.set({'C_m': 100.0})
C_m = nodes.get('C_m')
self.assertEqual(C_m, (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
# Set same value for all nodes.
nodes.set(tau_Ca=500.0)
tau_Ca = nodes.get('tau_Ca')
self.assertEqual(tau_Ca, (500.0, 500.0, 500.0, 500.0, 500.0,
500.0, 500.0, 500.0, 500.0, 500.0))
# List of dicts, where each dict corresponds to a single node.
nodes.set(({'V_m': 10.0}, {'V_m': 20.0}, {'V_m': 30.0}, {'V_m': 40.0},
{'V_m': 50.0}, {'V_m': 60.0}, {'V_m': 70.0}, {'V_m': 80.0},
{'V_m': 90.0}, {'V_m': -100.0}))
V_m = nodes.get('V_m')
self.assertEqual(V_m, (10.0, 20.0, 30.0, 40.0, 50.0,
60.0, 70.0, 80.0, 90.0, -100.0))
# Set value of a parameter based on list. List must be length of nodes.
nodes.set(V_reset=[-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.])
V_reset = nodes.get('V_reset')
self.assertEqual(V_reset, (-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.))
with self.assertRaises(IndexError):
nodes.set(V_reset=[-85., -82., -80., -77., -75.])
# Set different parameters with a dictionary.
nodes.set({'t_ref': 44.0, 'tau_m': 2.0, 'tau_minus': 42.0})
g = nodes.get(['t_ref', 'tau_m', 'tau_minus'])
self.assertEqual(g['t_ref'], (44.0, 44.0, 44.0, 44.0, 44.0,
44.0, 44.0, 44.0, 44.0, 44.0))
self.assertEqual(g['tau_m'], (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertEqual(g['tau_minus'], (42.0, 42.0, 42.0, 42.0, 42.0,
42.0, 42.0, 42.0, 42.0, 42.0))
with self.assertRaises(nest.kernel.NESTError):
nodes.set({'vp': 2})
def test_set_composite(self):
"""
Test that set works on composite NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes[2:5].set(({'V_m': -50.0}, {'V_m': -40.0}, {'V_m': -30.0}))
nodes[5:7].set({'t_ref': 4.4, 'tau_m': 3.0})
nodes[2:9:2].set(C_m=111.0)
V_m = nodes.get('V_m')
g = nodes.get(['t_ref', 'tau_m'])
C_m = nodes.get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -50.0, -40.0, -30.0,
-70.0, -70.0, -70.0, -70.0, -70.0,))
self.assertEqual(g, {'t_ref': (2.0, 2.0, 2.0, 2.0, 2.0,
4.4, 4.4, 2.0, 2.0, 2.0),
'tau_m': (10.0, 10.0, 10.0, 10.0, 10.0,
3.00, 3.00, 10.0, 10.0, 10.0)})
self.assertEqual(C_m, (250.0, 250.0, 111.0, 250.0, 111.0,
250.0, 111.0, 250.0, 111.0, 250.0))
def test_get_attribute(self):
"""Test get using getattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
self.assertEqual(nodes.C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(nodes.global_id, tuple(range(1, 11)))
self.assertEqual(nodes.E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
with self.assertRaises(KeyError):
print(nodes.nonexistent_attribute)
self.assertIsNone(nodes.spatial)
spatial_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([2, 2]))
self.assertIsNotNone(spatial_nodes.spatial)
spatial_reference = {'network_size': 4,
'center': (0.0, 0.0),
'edge_wrap': False,
'extent': (1.0, 1.0),
'shape': (2, 2)}
self.assertEqual(spatial_nodes.spatial, spatial_reference)
def test_set_attribute(self):
"""Test set using setattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes.C_m = 100.0
self.assertEqual(nodes.get('C_m'), (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
v_reset_reference = (-85., -82., -80., -77., -75., -72., -70., -67., -65., -62.)
nodes.V_reset = v_reset_reference
self.assertEqual(nodes.get('V_reset'), v_reset_reference)
with self.assertRaises(IndexError):
nodes.V_reset = [-85., -82., -80., -77., -75.]
with self.assertRaises(nest.kernel.NESTError):
nodes.nonexistent_attribute = 1.
def suite():
suite = unittest.makeSuite(TestNodeCollectionGetSet, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
pvlib/pvlib-python | pvlib/tests/test_numerical_precision.py | 2 | 4331 | """
Test numerical precision of explicit single diode calculation using symbolic
mathematics. SymPy is a computer algebra system, that uses infinite precision
symbols instead of standard floating point and integer computer number types.
http://docs.sympy.org/latest/modules/evalf.html#accuracy-and-error-handling
This module can be executed from the command line to generate a high precision
dataset of I-V curve points to test the explicit single diode calculations
:func:`pvlib.singlediode.bishop88`::
$ python test_numeric_precision.py
This generates a file in the pvlib data folder, which is specified by the
constant ``DATA_PATH``. When the test is run using ``pytest`` it will compare
the values calculated by :func:`pvlib.singlediode.bishop88` with the
high-precision values generated with SymPy.
"""
import logging
import numpy as np
import pandas as pd
from pvlib import pvsystem
from pvlib.singlediode import bishop88, estimate_voc
from .conftest import DATA_DIR
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
TEST_DATA = 'bishop88_numerical_precision.csv'
DATA_PATH = DATA_DIR / TEST_DATA
POA = 888
TCELL = 55
# module parameters from CEC module SunPower SPR-E20-327
SPR_E20_327 = {
'alpha_sc': 0.004522,
'a_ref': 2.6868,
'I_L_ref': 6.468,
'I_o_ref': 1.88e-10,
'R_s': 0.37,
'R_sh_ref': 298.13,
}
# apply temp/irrad desoto corrections
ARGS = pvsystem.calcparams_desoto(
effective_irradiance=POA, temp_cell=TCELL,
EgRef=1.121, dEgdT=-0.0002677, **SPR_E20_327,
)
IL, I0, RS, RSH, NNSVTH = ARGS
IVCURVE_NPTS = 100
try:
from sympy import symbols, exp as sy_exp
except ImportError as exc:
LOGGER.exception(exc)
symbols = NotImplemented
sy_exp = NotImplemented
def generate_numerical_precision(): # pragma: no cover
"""
Generate expected data with infinite numerical precision using SymPy.
:return: dataframe of expected values
"""
if symbols is NotImplemented:
LOGGER.critical("SymPy is required to generate expected data.")
raise ImportError("could not import sympy")
il, io, rs, rsh, nnsvt, vd = symbols('il, io, rs, rsh, nnsvt, vd')
a = sy_exp(vd / nnsvt)
b = 1.0 / rsh
i = il - io * (a - 1.0) - vd * b
v = vd - i * rs
c = io * a / nnsvt
grad_i = - c - b # di/dvd
grad_v = 1.0 - grad_i * rs # dv/dvd
# dp/dv = d(iv)/dv = v * di/dv + i
grad = grad_i / grad_v # di/dv
p = i * v
grad_p = v * grad + i # dp/dv
grad2i = -c / nnsvt
grad2v = -grad2i * rs
grad2p = (
grad_v * grad + v * (grad2i/grad_v - grad_i*grad2v/grad_v**2) + grad_i
)
# generate exact values
data = dict(zip((il, io, rs, rsh, nnsvt), ARGS))
vdtest = np.linspace(0, estimate_voc(IL, I0, NNSVTH), IVCURVE_NPTS)
expected = []
for test in vdtest:
data[vd] = test
test_data = {
'i': np.float64(i.evalf(subs=data)),
'v': np.float64(v.evalf(subs=data)),
'p': np.float64(p.evalf(subs=data)),
'grad_i': np.float64(grad_i.evalf(subs=data)),
'grad_v': np.float64(grad_v.evalf(subs=data)),
'grad': np.float64(grad.evalf(subs=data)),
'grad_p': np.float64(grad_p.evalf(subs=data)),
'grad2p': np.float64(grad2p.evalf(subs=data))
}
LOGGER.debug(test_data)
expected.append(test_data)
return pd.DataFrame(expected, index=vdtest)
def test_numerical_precision():
"""
Test that there are no numerical errors due to floating point arithmetic.
"""
expected = pd.read_csv(DATA_PATH)
vdtest = np.linspace(0, estimate_voc(IL, I0, NNSVTH), IVCURVE_NPTS)
results = bishop88(vdtest, *ARGS, gradients=True)
assert np.allclose(expected['i'], results[0])
assert np.allclose(expected['v'], results[1])
assert np.allclose(expected['p'], results[2])
assert np.allclose(expected['grad_i'], results[3])
assert np.allclose(expected['grad_v'], results[4])
assert np.allclose(expected['grad'], results[5])
assert np.allclose(expected['grad_p'], results[6])
assert np.allclose(expected['grad2p'], results[7])
if __name__ == '__main__': # pragma: no cover
expected = generate_numerical_precision()
expected.to_csv(DATA_PATH)
test_numerical_precision()
| bsd-3-clause |
vhaasteren/piccard | testing/pixels.py | 1 | 37522 | #!/usr/bin/env python
# encoding: utf-8
# vim: tabstop=4:softtabstop=4:shiftwidth=4:expandtab
"""
anisotropy.py
Requirements:
- numpy: pip install numpy
- matplotlib: macports, apt-get
- libstempo: pip install libstempo (optional, required for creating HDF5
files, and for non-linear timing model analysis
Created by vhaasteren on 2013-08-06.
Copyright (c) 2013 Rutger van Haasteren
"""
from __future__ import division
import numpy as np
import math
import scipy.linalg as sl, scipy.special as ss
import matplotlib.pyplot as plt
import os, glob
import sys
import json
import tempfile
import healpy as hp
import libstempo as lt
import triangle, acor
#from . import sphericalharmonics as ang # Internal module
import sphericalharmonics as ang # Internal module (actually called anisotropygammas in Piccard)
# Should be replaced with this pixel/updated
# Sph work.
# Some constants used in Piccard
# For DM calculations, use this constant
# See You et al. (2007) - http://arxiv.org/abs/astro-ph/0702366
# Lee et al. (in prep.) - ...
# Units here are such that delay = DMk * DM * freq^-2 with freq in MHz
pic_DMk = 4.15e3 # Units MHz^2 cm^3 pc sec
pic_spd = 86400.0 # Seconds per day
pic_spy = 31557600.0 # Seconds per year (yr = 365.25 days, so Julian years)
pic_T0 = 53000.0 # MJD to which all HDF5 toas are referenced
pic_pc = 3.08567758e16 # Parsec in meters
pic_c = 299792458 # Speed of light in m/s
def real_sph_harm(mm, ll, phi, theta):
"""
The real-valued spherical harmonics
"""
if mm>0:
ans = (1./math.sqrt(2)) * \
(ss.sph_harm(mm, ll, phi, theta) + \
((-1)**mm) * ss.sph_harm(-mm, ll, phi, theta))
elif mm==0:
ans = ss.sph_harm(0, ll, phi, theta)
elif mm<0:
ans = (1./(math.sqrt(2)*complex(0.,1))) * \
(ss.sph_harm(-mm, ll, phi, theta) - \
((-1)**mm) * ss.sph_harm(mm, ll, phi, theta))
return ans.real
def signalResponse(ptapsrs, gwtheta, gwphi):
"""
Create the signal response matrix
"""
psrpos_phi = np.array([ptapsrs[ii].raj for ii in range(len(ptapsrs))])
psrpos_theta = np.array([np.pi/2.0 - ptapsrs[ii].decj for ii in range(len(ptapsrs))])
return signalResponse_fast(psrpos_theta, psrpos_phi, gwtheta, gwphi)
def signalResponse_fast(ptheta_a, pphi_a, gwtheta_a, gwphi_a):
"""
Create the signal response matrix FAST
"""
npsrs = len(ptheta_a)
# Create a meshgrid for both phi and theta directions
gwphi, pphi = np.meshgrid(gwphi_a, pphi_a)
gwtheta, ptheta = np.meshgrid(gwtheta_a, ptheta_a)
return createSignalResponse(pphi, ptheta, gwphi, gwtheta)
def createSignalResponse(pphi, ptheta, gwphi, gwtheta):
"""
Create the signal response matrix. All parameters are assumed to be of the
same dimensionality.
@param pphi: Phi of the pulsars
@param ptheta: Theta of the pulsars
@param gwphi: Phi of GW location
@param gwtheta: Theta of GW location
@return: Signal response matrix of Earth-term
"""
Fp = createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=True)
Fc = createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=False)
F = np.zeros((Fp.shape[0], 2*Fp.shape[1]))
F[:, 0::2] = Fp
F[:, 1::2] = Fc
return F
def createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=True, norm=True):
"""
Create the signal response matrix. All parameters are assumed to be of the
same dimensionality.
@param pphi: Phi of the pulsars
@param ptheta: Theta of the pulsars
@param gwphi: Phi of GW location
@param gwtheta: Theta of GW location
@param plus: Whether or not this is the plus-polarization
@return: Signal response matrix of Earth-term
"""
# Create the direction vectors. First dimension will be collapsed later
Omega = np.array([-np.sin(gwtheta)*np.cos(gwphi), \
-np.sin(gwtheta)*np.sin(gwphi), \
-np.cos(gwtheta)])
mhat = np.array([-np.sin(gwphi), np.cos(gwphi), np.zeros(gwphi.shape)])
nhat = np.array([-np.cos(gwphi)*np.cos(gwtheta), \
-np.cos(gwtheta)*np.sin(gwphi), \
np.sin(gwtheta)])
p = np.array([np.cos(pphi)*np.sin(ptheta), \
np.sin(pphi)*np.sin(ptheta), \
np.cos(ptheta)])
# There is a factor of 3/2 difference between the Hellings & Downs
# integral, and the one presented in Jenet et al. (2005; also used by Gair
# et al. 2014). This factor 'normalises' the correlation matrix, but I don't
# see why I have to pull this out of my ass here. My antennae patterns are
# correct, so does this mean our strain amplitude is re-scaled. Check this.
npixels = Omega.shape[2]
if norm:
# Add extra factor of 3/2
c = np.sqrt(1.5) / np.sqrt(npixels)
else:
c = 1.0 / np.sqrt(npixels)
# Calculate the Fplus or Fcross antenna pattern. Definitions as in Gair et
# al. (2014), with right-handed coordinate system
if plus:
# The sum over axis=0 represents an inner-product
Fsig = 0.5 * c * (np.sum(nhat * p, axis=0)**2 - np.sum(mhat * p, axis=0)**2) / \
(1 + np.sum(Omega * p, axis=0))
else:
# The sum over axis=0 represents an inner-product
Fsig = c * np.sum(mhat * p, axis=0) * np.sum(nhat * p, axis=0) / \
(1 + np.sum(Omega * p, axis=0))
return Fsig
def almFromClm(clm):
"""
Given an array of clm values, return an array of complex alm valuex
Note: There is a bug in healpy for the negative m values. This function just
takes the imaginary part of the abs(m) alm index.
"""
maxl = int(np.sqrt(len(clm)))-1
nclm = len(clm)
# Construct alm from clm
nalm = hp.Alm.getsize(maxl)
alm = np.zeros((nalm), dtype=np.complex128)
clmindex = 0
for ll in range(0, maxl+1):
for mm in range(-ll, ll+1):
almindex = hp.Alm.getidx(maxl, ll, abs(mm))
if mm == 0:
alm[almindex] += clm[clmindex]
elif mm < 0:
alm[almindex] -= 1j * clm[clmindex] / np.sqrt(2)
elif mm > 0:
alm[almindex] += clm[clmindex] / np.sqrt(2)
clmindex += 1
return alm
def clmFromAlm(alm):
"""
Given an array of clm values, return an array of complex alm valuex
Note: There is a bug in healpy for the negative m values. This function just
takes the imaginary part of the abs(m) alm index.
"""
nalm = len(alm)
maxl = int(np.sqrt(9.0 - 4.0 * (2.0-2.0*nalm))*0.5 - 1.5)
nclm = (maxl+1)**2
# Check the solution
if nalm != int(0.5 * (maxl+1) * (maxl+2)):
raise ValueError("Check numerical precision. This should not happen")
clm = np.zeros(nclm)
clmindex = 0
for ll in range(0, maxl+1):
for mm in range(-ll, ll+1):
almindex = hp.Alm.getidx(maxl, ll, abs(mm))
if mm == 0:
#alm[almindex] += clm[clmindex]
clm[clmindex] = alm[almindex].real
elif mm < 0:
#alm[almindex] -= 1j * clm[clmindex] / np.sqrt(2)
clm[clmindex] = - alm[almindex].imag * np.sqrt(2)
elif mm > 0:
#alm[almindex] += clm[clmindex] / np.sqrt(2)
clm[clmindex] = alm[almindex].real * np.sqrt(2)
clmindex += 1
return clm
def mapFromClm_fast(clm, nside):
"""
Given an array of C_{lm} values, produce a pixel-power-map (non-Nested) for
healpix pixelation with nside
@param clm: Array of C_{lm} values (inc. 0,0 element)
@param nside: Nside of the healpix pixelation
return: Healpix pixels
Use Healpix spherical harmonics for computational efficiency
"""
maxl = int(np.sqrt(len(clm)))-1
alm = almFromClm(clm)
h = hp.alm2map(alm, nside, maxl, verbose=False)
return h
def mapFromClm(clm, nside):
"""
Given an array of C_{lm} values, produce a pixel-power-map (non-Nested) for
healpix pixelation with nside
@param clm: Array of C_{lm} values (inc. 0,0 element)
@param nside: Nside of the healpix pixelation
return: Healpix pixels
"""
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
h = np.zeros(npixels)
ind = 0
maxl = int(np.sqrt(len(clm)))-1
for ll in range(maxl+1):
for mm in range(-ll, ll+1):
h += clm[ind] * real_sph_harm(mm, ll, pixels[1], pixels[0])
ind += 1
return h
def clmFromMap_fast(h, lmax):
"""
Given a pixel map, and a maximum l-value, return the corresponding C_{lm}
values.
@param h: Sky power map
@param lmax: Up to which order we'll be expanding
return: clm values
Use Healpix spherical harmonics for computational efficiency
"""
alm = hp.sphtfunc.map2alm(h, lmax=lmax)
alm[0] = np.sum(h) * np.sqrt(4*np.pi) / len(h)
return clmFromAlm(alm)
def clmFromMap(h, lmax):
"""
Given a pixel map, and a maximum l-value, return the corresponding C_{lm}
values.
@param h: Sky power map
@param lmax: Up to which order we'll be expanding
return: clm values
"""
npixels = len(h)
nside = hp.npix2nside(npixels)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
clm = np.zeros( (lmax+1)**2 )
ind = 0
for ll in range(lmax+1):
for mm in range(-ll, ll+1):
clm[ind] += np.sum(h * real_sph_harm(mm, ll, pixels[1], pixels[0]))
ind += 1
return clm * 4 * np.pi / npixels
def getCov(clm, nside, F_e):
"""
Given a vector of clm values, construct the covariance matrix
"""
# Create a sky-map (power)
# Use mapFromClm to compare to real_sph_harm. Fast uses Healpix
sh00 = mapFromClm_fast(clm, nside)
# Double the power (one for each polarization)
sh = np.array([sh00, sh00]).T.flatten()
# Create the cross-pulsar covariance
hdcov_F = np.dot(F_e * sh, F_e.T)
# The pulsar term is added (only diagonals: uncorrelated)
return hdcov_F + np.diag(np.diag(hdcov_F))
def SH_CorrBasis(psr_locs, lmax, nside=32):
"""
Calculate the correlation basis matrices using the pixel-space
transormations
@param psr_locs: Location of the pulsars [phi, theta]
@param lmax: Maximum l to go up to
@param nside: What nside to use in the pixelation [32]
"""
npsrs = len(psr_locs)
pphi = psr_locs[:,0]
ptheta = psr_locs[:,1]
# Create the pixels
npixels = hp.nside2npix(nside) # number of pixels total
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
gwtheta = pixels[0]
gwphi = pixels[1]
# Create the signal response matrix
F_e = signalResponse_fast(ptheta, pphi, gwtheta, gwphi)
# Loop over all (l,m)
basis = []
nclm = (lmax+1)**2
clmindex = 0
for ll in range(0, lmax+1):
for mm in range(-ll, ll+1):
clm = np.zeros(nclm)
clm[clmindex] = 1.0
basis.append(getCov(clm, nside, F_e))
clmindex += 1
return basis
def fourierdesignmatrix(t, nmodes, Ttot=None):
"""
Calculate the matrix of Fourier modes A, given a set of timestamps
These are sine/cosine basis vectors at evenly separated frequency bins
Mode 0: sin(f_0)
Mode 1: cos(f_0)
Mode 2: sin(f_1)
... etc
@param nmodes: The number of modes that will be included (= 2*nfreq)
@param Ttot: Total duration experiment (in case not given by t)
@return: (A, freqs), with A the 'fourier design matrix', and f the associa
"""
N = t.size
A = np.zeros([N, nmodes])
T = t.max() - t.min()
if(nmodes % 2 != 0):
print "WARNING: Number of modes should be even!"
# The frequency steps
#deltaf = (N-1.0) / (N*T) # This would be orthogonal for regular sampling
if Ttot is None:
deltaf = 1.0 / T
else:
deltaf = 1.0 / Ttot
freqs1 = np.linspace(deltaf, (nmodes/2)*deltaf, nmodes/2)
freqs = np.array([freqs1, freqs1]).T.flatten()
# The cosine modes
for i in range(0, nmodes, 2):
omega = 2.0 * np.pi * freqs[i]
A[:,i] = np.cos(omega * t)
# The sine modes
for i in range(1, nmodes, 2):
omega = 2.0 * np.pi * freqs[i]
A[:,i] = np.sin(omega * t)
# This normalisation would make F unitary in the case of regular sampling
# A = A * np.sqrt(2.0/N)
return (A, freqs)
def designqsd(t):
"""
Calculate the design matrix for quadratic spindown
@param t: array of toas
"""
M = np.ones([len(t), 3])
M[:,1] = t
M[:,2] = t ** 2
return M.copy()
# Very simple pulsar class
class ptaPulsar(object):
def __init__(self, raj, decj, name, toas, residuals, toaerrs, dist=1.0, \
nfreqs=10):
self.raj = raj
self.decj = decj
self.name = name
self.toas = toas
self.residuals = residuals
self.toaerrs = toaerrs
self.pos = np.array([np.cos(self.decj)*np.cos(self.raj),
np.cos(self.decj)*np.sin(self.raj),
np.sin(self.decj)])
self.dist = dist
self.T = (np.max(self.toas) - np.min(self.toas)) * pic_spd
self.Ft, self.freqs = fourierdesignmatrix(self.toas * pic_spd, 2*nfreqs)
def getRMS(self, useResiduals=False):
"""
Calculate the weighted RMS
@param useResiduals: If true, use the residuals to calculate the RMS.
Otherwise, weigh the errorbars
"""
RMS = 0
if useResiduals:
W = 1.0 / self.toaerrs**2
SPS = np.sum(self.residuals**2 * W)
SP = np.sum(self.residuals * W)
SW = np.sum(W)
chi2 = (SPS - SP*SP/SW)
RMS = np.sqrt( (SPS - SP*SP/SW)/SW )
else:
RMS = np.sqrt(len(self.toas) / np.sum(1.0 / self.toaerrs**2))
return RMS
def readArray(partimdir, mindist=0.5, maxdist=2.0):
"""
Read in a list of ptaPulsar objects from a set of par/tim files. Pulsar
distances are randomly drawn between two values
@param partimdir: Directory of par/tim files
@param mindist: Minimum distance of pulsar
@param maxdist: Maximum distance of pulsar
@return: list of ptapsrs
"""
ptapsrs = []
curdir = os.getcwd()
os.chdir(partimdir)
for ii, infile in enumerate(glob.glob(os.path.join('./', 'J*.par') )):
filename = os.path.splitext(infile)
basename = os.path.basename(filename[0])
parfile = './' + basename +'.par'
timfile = './' + basename +'.tim'
psr = lt.tempopulsar(parfile, timfile, dofit=False)
dist = mindist + np.random.rand(1) * (maxdist - mindist)
ptapsrs.append(ptaPulsar(psr['RAJ'].val, psr['DECJ'].val, psr.name, \
psr.toas(), psr.residuals(), \
psr.toaerrs*1.0e-6, 1000*dist))
os.chdir(curdir)
return ptapsrs
def genArray(npsrs=20, Ntoas=500, toaerr=1.0e-7, T=315576000.0, mindist=0.5, maxdist=2.0):
"""
Generate a set of pulsars
@param npsrs: Number of pulsars
@param Ntoas: Number of observations per pulsar
@param toaerr: TOA uncertainty (sec.)
@param T: Length dataset (sec.)
@param mindist: Minimum distance of pulsar (kpc)
@param maxdist: Maximum distance of pulsar (kpc)
@return: list of ptapsrs
"""
ptapsrs = []
for ii in range(npsrs):
# Make a pulsar
name = "Pulsar" + str(ii)
loc = [np.random.rand(1)[0] * np.pi*2, np.arccos(2*np.random.rand(1)[0]-1)]
toas = np.linspace(0, T, Ntoas)
toaerrs = np.ones(Ntoas) * toaerr
residuals = np.random.randn(Ntoas) * toaerr
dist = mindist + np.random.rand(1) * (maxdist - mindist)
ptapsrs.append(ptaPulsar(loc[0], np.pi/2.0 - loc[1], name, toas / pic_spd,\
residuals, toaerrs, 1000*dist))
return ptapsrs
"""
with n the number of pulsars, return an nxn matrix representing the H&D
correlation matrix
"""
def hdcorrmat(ptapsrs, psrTerm=True):
""" Constructs a correlation matrix consisting of the Hellings & Downs
correlation coefficients. See Eq. (A30) of Lee, Jenet, and
Price ApJ 684:1304 (2008) for details.
@param: list of ptaPulsar (or any other markXPulsar) objects
"""
npsrs = len(ptapsrs)
raj = [ptapsrs[i].raj for i in range(npsrs)]
decj = [ptapsrs[i].decj for i in range(npsrs)]
pp = np.array([np.cos(decj)*np.cos(raj), np.cos(decj)*np.sin(raj), np.sin(decj)]).T
cosp = np.array([[np.dot(pp[i], pp[j]) for i in range(npsrs)] for j in range(npsrs)])
cosp[cosp > 1.0] = 1.0
xp = 0.5 * (1 - cosp)
old_settings = np.seterr(all='ignore')
logxp = 1.5 * xp * np.log(xp)
np.fill_diagonal(logxp, 0)
np.seterr(**old_settings)
if psrTerm:
coeff = 1.0
else:
coeff = 0.0
hdmat = logxp - 0.25 * xp + 0.5 + coeff * 0.5 * np.diag(np.ones(npsrs))
return hdmat
"""
with n the number of pulsars, return an nxn matrix representing the dipole
(ephemeris) correlation matrix
"""
def dipolecorrmat(ptapsrs):
""" Constructs a correlation matrix consisting of simple dipole correlations
"""
npsrs = len(ptapsrs)
raj = [ptapsrs[i].raj[0] for i in range(npsrs)]
decj = [ptapsrs[i].decj[0] for i in range(npsrs)]
pp = np.array([np.cos(decj)*np.cos(raj), np.cos(decj)*np.sin(raj), np.sin(decj)]).T
cosp = np.array([[np.dot(pp[i], pp[j]) for i in range(npsrs)] for j in range(npsrs)])
cosp[cosp > 1.0] = 1.0
return cosp
# The GWB general anisotropic correlations as defined in
# Mingarelli and Vecchio (submitted); Taylor and Gair (submitted)
class aniCorrelations(object):
def __init__(self, psrs=None, l=1):
self.phiarr = None # The phi pulsar position parameters
self.thetaarr = None # The theta pulsar position parameters
self.gamma_ml = None # The gamma_ml (see anisotropygammas.py)
self.priorNside = 8
self.priorNpix = 8
self.priorPix = None
self.SpHmat = None
#self.priorgridbins = 16
#self.priorphi = None
#self.priortheta = None
self.corrhd = None
self.corr = []
if psrs != None:
# If we have a pulsars object, initialise the angular quantities
self.setmatrices(psrs, l)
def clmlength(self):
return (self.l+1)**2-1
def setmatrices(self, psrs, l):
# First set all the pulsar positions
self.phiarr = np.zeros(len(psrs))
self.thetaarr = np.zeros(len(psrs))
self.l = l
for ii in range(len(psrs)):
self.phiarr[ii] = psrs[ii].raj
self.thetaarr[ii] = np.pi/2 - psrs[ii].decj
# Create the prior-grid pixels
self.priorNside = 8
self.priorNpix = hp.nside2npix(self.priorNside)
self.priorPix = hp.pix2ang(self.priorNside, \
np.arange(self.priorNpix), nest=False)
self.SpHmat = np.zeros((self.priorNpix, self.clmlength()))
for ii in range(self.priorNpix):
cindex = 0
for ll in range(1, self.l+1):
for mm in range(-ll, ll+1):
self.SpHmat[ii, cindex] = \
real_sph_harm(mm, ll, \
self.priorPix[1][ii], \
self.priorPix[0][ii])
cindex += 1
self.corrhd = hdcorrmat(psrs)
for ll in range(1, self.l+1):
mmodes = 2*ll+1 # Number of modes for this ll
# Create the correlation matrices for this value of l
for mm in range(mmodes):
self.corr.append(np.zeros((len(psrs), len(psrs))))
for aa in range(len(psrs)):
for bb in range(aa, len(psrs)):
plus_gamma_ml = [] # gammas for this pulsar pair
neg_gamma_ml = []
gamma_ml = []
for mm in range(ll+1):
intg_gamma = ang.int_Gamma_lm(mm, ll, \
self.phiarr[aa], self.phiarr[bb], \
self.thetaarr[aa],self.thetaarr[bb])
neg_intg_gamma= (-1)**(mm) * intg_gamma # (-1)^m Gamma_ml
plus_gamma_ml.append(intg_gamma) # all gammas
neg_gamma_ml.append(neg_intg_gamma) # neg m gammas
neg_gamma_ml = neg_gamma_ml[1:] # Use 0 only once
rev_neg_gamma_ml = neg_gamma_ml[::-1] # Reverse list direction
gamma_ml = rev_neg_gamma_ml+plus_gamma_ml
# Fill the corrcur matrices for all m
mindex = len(self.corr) - mmodes # Index first m mode
for mm in range(mmodes):
m = mm - ll
self.corr[mindex+mm][aa, bb] = \
ang.real_rotated_Gammas(m, ll, \
self.phiarr[aa], self.phiarr[bb], \
self.thetaarr[aa], self.thetaarr[bb], gamma_ml)
if aa != bb:
self.corr[mindex+mm][bb, aa] = self.corr[mindex+mm][aa, bb]
def priorIndicator(self, clm):
# Check whether sum_lm c_lm * Y_lm > 0 for this combination of clm
if self.priorPix == None or self.SpHmat == None:
raise ValueError("ERROR: first define the anisotropic prior-check positions")
# Number of clm is 3 + 5 + 7 + ... (2*self.l+1)
if len(clm) != self.clmlength():
print "len(clm) = ", len(clm), "clmlength = ", self.clmlength()
raise ValueError("ERROR: len(clm) != clmlength")
clmYlm = clm * self.SpHmat
S = np.sum(clmYlm, axis=1) + 1.0
return np.all(S > 0.0)
# Return the full correlation matrix that depends on the clm. This
# correlation matrix only needs to be multiplied with the signal amplitude
# and the time-correlations
def corrmat(self, clm):
# Number of clm is 3 + 5 + 7 + ... (2*self.l+1)
if len(clm) != self.clmlength():
raise ValueError("ERROR: len(clm) != clmlength")
corrreturn = self.corrhd.copy()
"""
np.savetxt('corrmat_0_0.txt', corrreturn)
"""
index = 0
for ll in range(1, self.l+1):
for mm in range(-ll, ll+1):
corrreturn += clm[index] * self.corr[index]
"""
if clm[index] != 0:
print "\nIndex = " + str(index) + " l, m = " + str(ll) + ',' + str(mm)
print "clm[index] = " + str(clm[index])
"""
"""
# Write the matrices to file
filename = 'corrmat_' + str(ll) + '_' + str(mm) + '.txt'
np.savetxt(filename, self.corr[index])
print "Just saved '" + filename + "'"
"""
index += 1
return corrreturn
"""
Function that calculates the earth-term gravitational-wave burst-with-memory
signal, as described in:
Seto et al, van haasteren and Levin, phsirkov et al, Cordes and Jenet.
parameter[0] = TOA time (sec) the burst hits the earth
parameter[1] = amplitude of the burst (strain h)
parameter[2] = azimuthal angle (rad)
parameter[3] = polar angle (rad)
parameter[4] = polarisation angle (rad)
raj = Right Ascension of the pulsar (rad)
decj = Declination of the pulsar (rad)
t = timestamps where the waveform should be returned
returns the waveform as induced timing residuals (seconds)
"""
def bwmsignal(parameters, raj, decj, t):
# The rotation matrices
rot1 = np.eye(3)
rot2 = np.eye(3)
rot3 = np.eye(3)
# Rotation along the azimuthal angle (raj source)
rot1[0,0] = np.cos(parameters[2]) ; rot1[0,1] = np.sin(parameters[2])
rot1[1,0] = -np.sin(parameters[2]) ; rot1[1,1] = np.cos(parameters[2])
# Rotation along the polar angle (decj source)
rot2[0,0] = np.sin(parameters[3]) ; rot2[0,2] = -np.cos(parameters[3])
rot2[2,0] = np.cos(parameters[3]) ; rot2[2,2] = np.sin(parameters[3])
# Rotate the bwm polarisation to match the x-direction
rot3[0,0] = np.cos(parameters[4]) ; rot3[0,1] = np.sin(parameters[4])
rot3[1,0] = -np.sin(parameters[4]) ; rot3[1,1] = np.cos(parameters[4])
# The total rotation matrix
rot = np.dot(rot1, np.dot(rot2, rot3))
# The pulsar position in Euclidian coordinates
ppos = np.zeros(3)
ppos[0] = np.cos(raj) * np.cos(decj)
ppos[1] = np.sin(raj) * np.cos(decj)
ppos[2] = np.sin(decj)
# Rotate the position of the pulsar
ppr = np.dot(rot, ppos)
# Antenna pattern
ap = 0.0
if np.abs(ppr[2]) < 1:
# Depending on definition of source position, it could be (1 - ppr[2])
ap = 0.5 * (1 + ppr[2]) * (2 * ppr[0] * ppr[0] / (1 - ppr[2]*ppr[2]) - 1)
2 * ppr[0] * ppr[0]
# Define the heaviside function
heaviside = lambda x: 0.5 * (np.sign(x) + 1)
# Return the time series
return ap * (10**parameters[1]) * heaviside(t - parameters[0]) * (t - parameters[0])
def genWhite(ptapsrs):
"""
Generate white residuals, according to the TOA uncertainties
"""
for ii, psr in enumerate(ptapsrs):
psr.residuals = np.random.randn(nobs)*psr.toaerrs
def addSignal(ptapsrs, sig, reset=False):
"""
Add the signal to the residuals
"""
for ii, psr in enumerate(ptapsrs):
if reset:
psr.residuals = np.random.randn(len(psr.residuals)) * psr.toaerrs
psr.residuals += sig[:, ii]
# Generation of gravitational waves (white spectrum)
def genGWB_white(ptapsrs, amp, Si=4.33, Fmat_e=None, Fmat_p=None):
"""
Returns a signal, in the form of a npsr x ntoas matrix, not yet added to the
residuals
@param ptapsrs: The list with pulsars
@param amp: GW amplitude
@param Si: Spectral index
@param Fmat_e: Earth-term signal response
@param Fmat_p: Pulsar-term signal response
"""
# Remember, this is the decomposition: C = U * D * U.T
npsrs = len(ptapsrs)
nmodes = ptapsrs[0].Ft.shape[1]
nobs = len(ptapsrs[0].toas)
# We really use signal response matrices
if Fmat_e is None:
raise ValueError("No signal resonse given")
if Fmat_p is not None:
psrTerm = True
F = Fmat_e + Fmat_p
else:
psrTerm = False
F = Fmat_e
# Do a thin SVD
U, s, Vt = sl.svd(F, full_matrices=False)
# Generate the mode data
#xi_t = np.random.randn(nmodes) # Time-correlations
#xi_c = np.random.randn(npsrs) # Spatial-correlations
xi_full = np.random.randn(npsrs, nobs)
sig_full = np.dot(U, (s*xi_full.T).T) * amp
return sig_full.T
# Generation of gravitational waves (white spectrum)
def genGWB_fromcov_white(ptapsrs, amp, Si=4.33, cov=None):
"""
Returns a signal, in the form of a npsr x ntoas matrix, not yet added to the
residuals
@param ptapsrs: The list with pulsars
@param amp: GW amplitude
@param Si: Spectral index
@param cov: The covariance matrix we will generate from
"""
# Remember, this is the decomposition: C = U * D * U.T
npsrs = len(ptapsrs)
nmodes = ptapsrs[0].Ft.shape[1]
nobs = len(ptapsrs[0].toas)
# We really do need the covariance matrix
if cov is None:
raise ValueError("No covariance matrix given")
# Cholesky factor of the correlations
cf = sl.cholesky(cov, lower=True)
# Generate the mode data
#xi_t = np.random.randn(nmodes) # Time-correlations
#xi_c = np.random.randn(npsrs) # Spatial-correlations
xi_full = np.random.randn(npsrs, nobs)
sig_full = np.dot(cf, xi_full) * amp
return sig_full.T
# Generation of gravitational waves
def genGWB_red(ptapsrs, amp, Si=4.33, Fmat_e=None, Fmat_p=None):
"""
Returns a signal, in the form of a npsr x ntoas matrix, not yet added to the
residuals
@param ptapsrs: The list with pulsars
@param amp: GW amplitude
@param Si: Spectral index
@param Fmat_e: Earth-term signal response
@param Fmat_p: Pulsar-term signal response
"""
# Remember, this is the decomposition: C = U * D * U.T
npsrs = len(ptapsrs)
nmodes = ptapsrs[0].Ft.shape[1]
nobs = len(ptapsrs[0].toas)
# We really use signal response matrices
if Fmat_e is None:
raise ValueError("No signal resonse given")
if Fmat_p is not None:
psrTerm = True
F = Fmat_e + Fmat_p
else:
psrTerm = False
F = Fmat_e
# Do a thin SVD
U, s, Vt = sl.svd(F, full_matrices=False)
# Generate the mode data
xi_t = np.random.randn(nmodes) # Time-correlations
xi_c = np.random.randn(npsrs) # Spatial-correlations
# Generate spatial correlations
s_c = np.dot(U, (s*xi_c.T).T)
# Generate the residuals
sig = np.zeros((nobs, npsrs))
psds = np.zeros((nmodes, npsrs))
for ii, psr in enumerate(ptapsrs):
# Generate in frequency domain, and transform to time domain
freqpy = psr.freqs * pic_spy
psd = (amp**2 * pic_spy**3 / (12*np.pi*np.pi * psr.T)) * freqpy ** (-Si)
# Generate the time-correlations
s_t = np.sqrt(psd) * xi_t
# Generate the signal
psds[:, ii] = s_c[ii] * s_t
sig[:, ii] = np.dot(psr.Ft, psds[:, ii])
return (sig, psds, s_c)
# Generation of gravitational waves
def genGWB_fromcov_red(ptapsrs, amp, Si=4.33, cov=None):
"""
Returns a signal, in the form of a npsr x ntoas matrix, not yet added to the
residuals
@param ptapsrs: The list with pulsars
@param amp: GW amplitude
@param Si: Spectral index
@param cov: The covariance matrix we will generate from
"""
# Remember, this is the decomposition: C = U * D * U.T
npsrs = len(ptapsrs)
nmodes = ptapsrs[0].Ft.shape[1]
nobs = len(ptapsrs[0].toas)
# We really do need the covariance matrix
if cov is None:
raise ValueError("No covariance matrix given")
# Cholesky factor of the correlations
cf = sl.cholesky(cov, lower=True)
# Generate the mode data
xi_t = np.random.randn(nmodes) # Time-correlations
xi_c = np.random.randn(npsrs) # Spatial-correlations
#p = np.dot(U, (s*xi.T).T)
s_c = np.dot(cf, xi_c)
# Generate the residuals
sig = np.zeros((nobs, npsrs))
psds = np.zeros((nmodes, npsrs))
for ii, psr in enumerate(ptapsrs):
# Generate in frequency domain, and transform to time domain
freqpy = psr.freqs * pic_spy
psd = (amp**2 * pic_spy**3 / (12*np.pi*np.pi * psr.T)) * freqpy ** (-Si)
# Generate the time-correlations
s_t = np.sqrt(psd) * xi_t
# Generate the signal
psds[:, ii] = s_c[ii] * s_t
sig[:, ii] = np.dot(psr.Ft, psds[:, ii])
return (sig, psds, s_c)
def crossPower(ptapsrs):
"""
Calculate the cross-power according to the Jenet/Demorest method
(Noise spectra are now diagonal, so it's rather quick)
"""
npsrs = len(ptapsrs)
pairs = int(npsrs * (npsrs-1) * 0.5)
angle = np.zeros(pairs)
crosspower = np.zeros(pairs)
crosspowererr = np.zeros(pairs)
hdcoeff = np.zeros(pairs)
ii = 0
for aa in range(npsrs):
psra = ptapsrs[aa]
for bb in range(aa+1, npsrs):
psrb = ptapsrs[bb]
angle[ii] = np.arccos(np.sum(psra.pos * psrb.pos))
xp = 0.5 * (1 - np.sum(psra.pos * psrb.pos))
logxp = 1.5 * xp * np.log(xp)
hdcoeff[ii] = logxp - 0.25 * xp + 0.5
# Create 'optimal statistic' (the errs are not necessary for now)
num = np.sum(psra.residuals * psrb.residuals / \
(psra.toaerrs * psrb.toaerrs)**2)
den = np.sum(1.0 / (psra.toaerrs*psrb.toaerrs)**2)
# Crosspower and uncertainty
crosspower[ii] = num / den
crosspowererr[ii] = 1.0 / np.sqrt(den)
ii += 1
return (angle, hdcoeff, crosspower, crosspowererr)
def fitCrossPower(hdcoeff, crosspower, crosspowererr):
"""
Fit the results of the optimal statistic crossPower to the Hellings and
Downs correlation function, and return the A^2 and \delta A^2
@param hdcoeff: Array of H&D coefficients for all the pulsar pairs
@param crosspower: Array of cross-power measured for all the pulsars
@param crosspowererr: Array of the error of the cross-power measurements
@return: Value of h^2 and the uncertainty in it.
"""
hc_sqr = np.sum(crosspower*hdcoeff / (crosspowererr*crosspowererr)) / np.sum(hdcoeff*hdcoeff / (crosspowererr*crosspowererr))
hc_sqrerr = 1.0 / np.sqrt(np.sum(hdcoeff * hdcoeff / (crosspowererr * crosspowererr)))
return hc_sqr, hc_sqrerr
# The following likelihoods are for too simplistic models, but with red signals.
# Don't use:
def mark1loglikelihood_red(pars, ptapsrs, Usig):
"""
@par pars: Parameters: GW amplitude, Spectral index, mode amps
@par ptapsrs: List of all PTA pulsars
@par Usig: Re-scaled range matrix
@return: log-likelihood
"""
npsrs = len(ptapsrs)
nobs = len(ptapsrs[0].toas)
nmodes = len(ptapsrs[0].freqs)
iia = 0
iib = iia + 2
iic = iib + npsrs
iid = iic + nmodes
Amp = 10**pars[0]
Si = pars[1]
r = pars[iib:iic] # Range amplitudes (isotropic for now)
a = pars[iic:iid] # Fourier modes of GW
# Find the correlation coefficients
c = np.dot(Usig, r)
xi2 = 0.0
ld = 0.0
for ii, psr in enumerate(ptapsrs):
xred = psr.residuals - c[ii] * np.dot(psr.Ft, a)
xi2 -= 0.5 * np.sum(xred**2 / psr.toaerrs**2)
ld -= 0.5 * np.sum(np.log(psr.toaerrs**2))
# Now the prior
freqpy = ptapsrs[0].freqs * pic_spy
psd = (Amp**2 * pic_spy**3 / (12*np.pi*np.pi * ptapsrs[0].T)) * freqpy ** (-Si)
# Subtract the correlations, too
ld -= 0.5 * np.sum(c**2)
xi2 -= 0.5 * np.sum(a**2 / psd)
ld -= 0.5 * np.sum(np.log(psd))
return xi2 + ld
def mark2loglikelihood_red(pars, ptapsrs, corrs):
"""
@par pars: Parameters: GW amplitude, Spectral index, mode amps
@par ptapsrs: List of all PTA pulsars
@par Usig: Re-scaled range matrix
@return: log-likelihood
This one uses the Clm values
"""
npsrs = len(ptapsrs)
nobs = len(ptapsrs[0].toas)
nmodes = len(ptapsrs[0].freqs)
nclm = len(pars) - 2 #- nmodes
nfreqs = nmodes / 2
lmax = np.sqrt(nclm+1)-1
iia = 0
iib = iia + 2
iic = iib + nclm
#iid = iic + nmodes
Amp = 10**pars[0]
Si = pars[1]
clm = pars[iib:iic] # Anisotropy parameters
#a = pars[iic:iid] # Fourier modes of GW
# Calculate the covariance matrix
cov = corrs.corrmat(clm)
try:
ccf = sl.cho_factor(cov)
except np.linalg.LinAlgError:
return -np.inf
cld = 2*np.sum(np.log(np.diag(ccf[0])))
covinv = sl.cho_solve(ccf, np.eye(npsrs))
# Calculate phi-inv
phiinv = np.zeros((npsrs*nmodes, npsrs*nmodes))
Sigma = np.zeros((npsrs*nmodes, npsrs*nmodes))
for ii in range(0, nmodes, 2):
sti = ii
eni = ii+npsrs*nmodes
stride = nmodes
phiinv[sti:eni:stride, sti:eni:stride] = covinv
phiinv[1+sti:eni:stride, 1+sti:eni:stride] = covinv
Sigma = phiinv.copy()
FNx = np.zeros(npsrs*nmodes)
xi2 = 0.0
ld = 0.0
for ii, psr in enumerate(ptapsrs):
xred = psr.residuals
xi2 -= 0.5 * np.sum(xred**2 / psr.toaerrs**2)
ld -= 0.5 * np.sum(np.log(psr.toaerrs**2))
Sigma[ii*nmodes:(ii+1)*nmodes, ii*nmodes:(ii+1)*nmodes] += np.dot(psr.Ft.T / (psr.toaerrs**2), psr.Ft)
FNx[ii*nmodes:(ii+1)*nmodes] = np.dot(psr.Ft.T, xred/(psr.toaerrs**2))
try:
scf = sl.cho_factor(Sigma)
except np.linalg.LinAlgError:
return -np.inf
sld = 2*np.sum(np.log(np.diag(scf[0])))
SFNx = sl.cho_solve(scf, FNx)
xi2 += 0.5 * np.dot(FNx, SFNx)
ld -= 0.5*npsrs*nmodes*cld
ld -= 0.5*sld
return xi2 + ld
def logprior_red(amps, amin=None, amax=None):
retval = 0.0
if amin is not None:
if not np.all(amps > amin):
retval = -np.inf
if amax is not None:
if not np.all(amps < amax):
retval = -np.inf
return retval
if __name__ == "__main__":
# Do some stuff
print "Hello, world!"
| gpl-3.0 |
choderalab/TrustButVerify | trustbutverify/mixture_system.py | 1 | 7995 | import tempfile
import numpy as np
import os
import mdtraj as md
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
from .target import Target
from .simulation_parameters import *
from .cirpy import resolve
import utils
from protein_system import System
import gaff2xml
import itertools
from pymbar import timeseries as ts
import pandas as pd
N_STEPS_MIXTURES = 500000 # 0.5 ns (at a time)
N_EQUIL_STEPS_MIXTURES = 5000000 # 5ns
OUTPUT_FREQUENCY_MIXTURES = 10000 # 10ps
OUTPUT_DATA_FREQUENCY_MIXTURES = 250 # 0.25ps
STD_ERROR_TOLERANCE = 0.0002 # g/mL
TIMESTEP = 1.0 * u.femtoseconds
class MixtureSystem(System):
def __init__(self, cas_strings, n_monomers, temperature, pressure=PRESSURE, output_frequency=OUTPUT_FREQUENCY_MIXTURES, output_data_frequency=OUTPUT_DATA_FREQUENCY_MIXTURES, n_steps=N_STEPS_MIXTURES, equil_output_frequency=OUTPUT_FREQUENCY_MIXTURES, stderr_tolerance = STD_ERROR_TOLERANCE, **kwargs):
super(MixtureSystem, self).__init__(temperature=temperature, pressure=pressure, output_frequency=output_frequency, n_steps=n_steps, equil_output_frequency=equil_output_frequency, **kwargs)
self._main_dir = os.getcwd()
self.cas_strings = cas_strings
self.n_monomers = n_monomers
identifier = list(itertools.chain(cas_strings, [str(n) for n in n_monomers], [str(temperature).split(' ')[0]]))
self._target_name = '_'.join(identifier)
self.output_data_frequency = output_data_frequency
self.stderr_tolerance = stderr_tolerance
self.ran_equilibrate = False
def build(self):
utils.make_path('monomers/')
utils.make_path('boxes/')
utils.make_path('ffxml/')
self.monomer_pdb_filenames = ["monomers/"+string+".pdb" for string in self.cas_strings]
self.box_pdb_filename = "boxes/" + self.identifier + ".pdb"
self.ffxml_filename = "ffxml/" + '_'.join(self.cas_strings) + ".xml"
utils.make_path(self.box_pdb_filename)
rungaff = False
if not os.path.exists(self.ffxml_filename):
rungaff = True
if not os.path.exists(self.box_pdb_filename):
for filename in self.monomer_pdb_filenames:
if not os.path.exists(filename):
rungaff = True
if rungaff:
self.smiles_strings = []
for mlc in self.cas_strings:
self.smiles_strings.append(resolve(mlc, 'smiles'))
oemlcs = []
with gaff2xml.utils.enter_temp_directory(): # Avoid dumping 50 antechamber files in local directory.
for smiles_string in self.smiles_strings:
m = gaff2xml.openeye.smiles_to_oemol(smiles_string)
m = gaff2xml.openeye.get_charges(m, strictStereo=False, keep_confs=1)
oemlcs.append(m)
ligand_trajectories, ffxml = gaff2xml.openeye.oemols_to_ffxml(oemlcs)
if not os.path.exists(self.ffxml_filename):
outfile = open(self.ffxml_filename, 'w')
outfile.write(ffxml.read())
outfile.close()
ffxml.seek(0)
for k, ligand_traj in enumerate(ligand_trajectories):
pdb_filename = self.monomer_pdb_filenames[k]
if not os.path.exists(pdb_filename):
ligand_traj.save(pdb_filename)
self.ffxml = app.ForceField(self.ffxml_filename)
if not os.path.exists(self.box_pdb_filename):
self.packed_trj = gaff2xml.packmol.pack_box(self.monomer_pdb_filenames, self.n_monomers)
self.packed_trj.save(self.box_pdb_filename)
else:
self.packed_trj = md.load(self.box_pdb_filename)
def equilibrate(self):
self.ran_equilibrate = True
utils.make_path('equil/')
self.equil_dcd_filename = "equil/"+self.identifier +"_equil.dcd"
self.equil_pdb_filename = "equil/"+self.identifier +"_equil.pdb"
utils.make_path(self.equil_pdb_filename)
if os.path.exists(self.equil_pdb_filename):
return
positions = self.packed_trj.openmm_positions(0)
topology = self.packed_trj.top.to_openmm()
topology.setUnitCellDimensions(mm.Vec3(*self.packed_trj.unitcell_lengths[0]) * u.nanometer)
ff = self.ffxml
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=self.cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(self.temperature, self.equil_friction, self.equil_timestep)
system.addForce(mm.MonteCarloBarostat(self.pressure, self.temperature, self.barostat_frequency))
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
print('Minimizing.')
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(self.temperature)
print('Equilibrating.')
simulation.reporters.append(app.DCDReporter(self.equil_dcd_filename, self.equil_output_frequency))
simulation.step(self.n_equil_steps)
# Re-write a better PDB with correct box sizes.
traj = md.load(self.equil_dcd_filename, top=self.box_pdb_filename)[-1]
traj.save(self.equil_pdb_filename)
def production(self):
utils.make_path('production/')
self.production_dcd_filename = "production/"+self.identifier +"_production.dcd"
self.production_pdb_filename = "production/"+self.identifier +"_production.pdb"
self.production_data_filename = "production/"+self.identifier +"_production.csv"
utils.make_path(self.production_dcd_filename)
if os.path.exists(self.production_pdb_filename):
return
if self.ran_equilibrate:
pdb = app.PDBFile(self.equil_pdb_filename)
topology = pdb.topology
positions = pdb.positions
else:
positions = self.packed_trj.openmm_positions(0)
topology = self.packed_trj.top.to_openmm()
topology.setUnitCellDimensions(mm.Vec3(*self.packed_trj.unitcell_lengths[0]) * u.nanometer)
ff = self.ffxml
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=self.cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(self.temperature, self.friction, self.timestep)
system.addForce(mm.MonteCarloBarostat(self.pressure, self.temperature, self.barostat_frequency))
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
if not self.ran_equilibrate:
print('Minimizing.')
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(self.temperature)
print('Production.')
simulation.reporters.append(app.DCDReporter(self.production_dcd_filename, self.output_frequency))
simulation.reporters.append(app.StateDataReporter(self.production_data_filename, self.output_data_frequency, step=True, potentialEnergy=True, temperature=True, density=True))
converged = False
while not converged:
simulation.step(self.n_steps)
d = pd.read_csv(self.production_data_filename, names=["step", "U", "Temperature", "Density"], skiprows=1)
density_ts = np.array(d.Density)
[t0, g, Neff] = ts.detectEquilibration(density_ts, nskip=1000)
density_ts = density_ts[t0:]
density_mean_stderr = density_ts.std() / np.sqrt(Neff)
if density_mean_stderr < self.stderr_tolerance:
converged = True
del(simulation)
if self.ran_equilibrate:
traj = md.load(self.production_dcd_filename, top=self.equil_pdb_filename)[-1]
else:
traj = md.load(self.production_dcd_filename, top=self.box_pdb_filename)[-1]
traj.save(self.production_pdb_filename)
| gpl-2.0 |
imitrichev/cantera | interfaces/cython/cantera/examples/onedim/diffusion_flame_batch.py | 1 | 8862 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2014 Thomas Fiala ([email protected]), Lehrstuhl für
# Thermodynamik, TU München. For conditions of distribution and use, see
# copyright notice in License.txt.
#
###############################################################################
"""
This example creates two batches of counterflow diffusion flame simulations.
The first batch computes counterflow flames at increasing pressure, the second
at increasing strain rates.
The tutorial makes use of the scaling rules derived by Fiala and Sattelmayer
(doi:10.1155/2014/484372). Please refer to this publication for a detailed
explanation. Also, please don't forget to cite it if you make use of it.
This example can e.g. be used to iterate to a counterflow diffusion flame to an
awkward pressure and strain rate, or to create the basis for a flamelet table.
"""
import cantera as ct
import numpy as np
import os
# Create directory for output data files
data_directory = 'diffusion_flame_batch_data/'
if not os.path.exists(data_directory):
os.makedirs(data_directory)
# Set refinement: False for fast simulations, True for smoother curves
refine = True
# PART 1: INITIALIZATION
# Set up an initial hydrogen-oxygen counterflow flame at 1 bar and low strain
# rate (maximum axial velocity gradient = 2414 1/s)
# Initial grid: 18mm wide, 21 points
reaction_mechanism = 'h2o2.xml'
gas = ct.Solution(reaction_mechanism)
initial_grid = np.linspace(0.0, 18e-3, 21)
f = ct.CounterflowDiffusionFlame(gas, initial_grid)
# Define the operating pressure and boundary conditions
f.P = 1.e5 # 1 bar
f.fuel_inlet.mdot = 0.5 # kg/m^2/s
f.fuel_inlet.X = 'H2:1'
f.fuel_inlet.T = 300 # K
f.oxidizer_inlet.mdot = 3.0 # kg/m^2/s
f.oxidizer_inlet.X = 'O2:1'
f.oxidizer_inlet.T = 300 # K
# Define relative and absolute error tolerances
f.flame.set_steady_tolerances(default=[1.0e-5, 1.0e-12])
f.flame.set_transient_tolerances(default=[5.0e-4, 1.0e-11])
# Set refinement parameters, if used
f.set_refine_criteria(ratio=3.0, slope=0.1, curve=0.2, prune=0.03)
f.set_grid_min(1e-20)
# Define a limit for the maximum temperature below which the flame is
# considered as extinguished and the computation is aborted
# This increases the speed of refinement is enabled
temperature_limit_extinction = 900 # K
def interrupt_extinction(t):
if np.max(f.T) < temperature_limit_extinction:
raise Exception('Flame extinguished')
return 0.
f.set_interrupt(interrupt_extinction)
# Initialize and solve
print('Creating the initial solution')
f.solve(loglevel=0, refine_grid=refine)
# Save to data directory
file_name = 'initial_solution.xml'
f.save(data_directory + file_name, name='solution',
description='Cantera version ' + ct.__version__ +
', reaction mechanism ' + reaction_mechanism)
# PART 2: BATCH PRESSURE LOOP
# Compute counterflow diffusion flames over a range of pressures
# Arbitrarily define a pressure range (in bar)
p_range = np.round(np.logspace(0, 2, 50), decimals=1)
# Exponents for the initial solution variation with changes in pressure Taken
# from Fiala and Sattelmayer (2014). The exponents are adjusted such that the
# strain rates increases proportional to p^(3/2), which results in flames
# similar with respect to the extinction strain rate.
exp_d_p = -5. / 4.
exp_u_p = 1. / 4.
exp_V_p = 3. / 2.
exp_lam_p = 4.
exp_mdot_p = 5. / 4.
# The variable p_previous (in bar) is used for the pressure scaling
p_previous = f.P / 1.e5
# Iterate over the pressure range
for p in p_range:
print('pressure = {0} bar'.format(p))
# set new pressure
f.P = p * 1.e5
# Create an initial guess based on the previous solution
rel_pressure_increase = p / p_previous
# Update grid
f.flame.grid *= rel_pressure_increase ** exp_d_p
normalized_grid = f.grid / (f.grid[-1] - f.grid[0])
# Update mass fluxes
f.fuel_inlet.mdot *= rel_pressure_increase ** exp_mdot_p
f.oxidizer_inlet.mdot *= rel_pressure_increase ** exp_mdot_p
# Update velocities
f.set_profile('u', normalized_grid,
f.u * rel_pressure_increase ** exp_u_p)
f.set_profile('V', normalized_grid,
f.V * rel_pressure_increase ** exp_V_p)
# Update pressure curvature
f.set_profile('lambda', normalized_grid,
f.L * rel_pressure_increase ** exp_lam_p)
try:
# Try solving the flame
f.solve(loglevel=0, refine_grid=refine)
file_name = 'pressure_loop_' + format(p, '05.1f') + '.xml'
f.save(data_directory + file_name, name='solution', loglevel=1,
description='Cantera version ' + ct.__version__ +
', reaction mechanism ' + reaction_mechanism)
p_previous = p
except Exception as e:
print('Error occurred while solving:', e, 'Try next pressure level')
# If solution failed: Restore the last successful solution and continue
f.restore(filename=data_directory + file_name, name='solution',
loglevel=0)
# PART 3: STRAIN RATE LOOP
# Compute counterflow diffusion flames at increasing strain rates at 1 bar
# The strain rate is assumed to increase by 25% in each step until the flame is
# extinguished
strain_factor = 1.25
# Exponents for the initial solution variation with changes in strain rate
# Taken from Fiala and Sattelmayer (2014)
exp_d_a = - 1. / 2.
exp_u_a = 1. / 2.
exp_V_a = 1.
exp_lam_a = 2.
exp_mdot_a = 1. / 2.
# Restore initial solution
file_name = 'initial_solution.xml'
f.restore(filename=data_directory + file_name, name='solution', loglevel=0)
# Counter to identify the loop
n = 0
# Do the strain rate loop
while np.max(f.T) > temperature_limit_extinction:
n += 1
print('strain rate iteration', n)
# Create an initial guess based on the previous solution
# Update grid
f.flame.grid *= strain_factor ** exp_d_a
normalized_grid = f.grid / (f.grid[-1] - f.grid[0])
# Update mass fluxes
f.fuel_inlet.mdot *= strain_factor ** exp_mdot_a
f.oxidizer_inlet.mdot *= strain_factor ** exp_mdot_a
# Update velocities
f.set_profile('u', normalized_grid, f.u * strain_factor ** exp_u_a)
f.set_profile('V', normalized_grid, f.V * strain_factor ** exp_V_a)
# Update pressure curvature
f.set_profile('lambda', normalized_grid, f.L * strain_factor ** exp_lam_a)
try:
# Try solving the flame
f.solve(loglevel=0, refine_grid=refine)
file_name = 'strain_loop_' + format(n, '02d') + '.xml'
f.save(data_directory + file_name, name='solution', loglevel=1,
description='Cantera version ' + ct.__version__ +
', reaction mechanism ' + reaction_mechanism)
except Exception as e:
if e.args[0] == 'Flame extinguished':
print('Flame extinguished')
else:
print('Error occurred while solving:', e)
break
# PART 4: PLOT SOME FIGURES
import matplotlib.pyplot as plt
fig1 = plt.figure()
fig2 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
ax2 = fig2.add_subplot(1,1,1)
p_selected = p_range[::7]
for p in p_selected:
file_name = 'pressure_loop_{0:05.1f}.xml'.format(p)
f.restore(filename=data_directory + file_name, name='solution', loglevel=0)
# Plot the temperature profiles for selected pressures
ax1.plot(f.grid / f.grid[-1], f.T, label='{0:05.1f} bar'.format(p))
# Plot the axial velocity profiles (normalized by the fuel inlet velocity)
# for selected pressures
ax2.plot(f.grid / f.grid[-1], f.u / f.u[0],
label='{0:05.1f} bar'.format(p))
ax1.legend(loc=0)
ax1.set_xlabel(r'$z/z_{max}$')
ax1.set_ylabel(r'$T$ [K]')
fig1.savefig(data_directory + 'figure_T_p.png')
ax2.legend(loc=0)
ax2.set_xlabel(r'$z/z_{max}$')
ax2.set_ylabel(r'$u/u_f$')
fig2.savefig(data_directory + 'figure_u_p.png')
fig3 = plt.figure()
fig4 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
ax4 = fig4.add_subplot(1,1,1)
n_selected = range(1, n, 5)
for n in n_selected:
file_name = 'strain_loop_{0:02d}.xml'.format(n)
f.restore(filename=data_directory + file_name, name='solution', loglevel=0)
a_max = f.strain_rate('max') # the maximum axial strain rate
# Plot the temperature profiles for the strain rate loop (selected)
ax3.plot(f.grid / f.grid[-1], f.T, label='{0:.2e} 1/s'.format(a_max))
# Plot the axial velocity profiles (normalized by the fuel inlet velocity)
# for the strain rate loop (selected)
ax4.plot(f.grid / f.grid[-1], f.u / f.u[0],
label=format(a_max, '.2e') + ' 1/s')
ax3.legend(loc=0)
ax3.set_xlabel(r'$z/z_{max}$')
ax3.set_ylabel(r'$T$ [K]')
fig3.savefig(data_directory + 'figure_T_a.png')
ax4.legend(loc=0)
ax4.set_xlabel(r'$z/z_{max}$')
ax4.set_ylabel(r'$u/u_f$')
fig4.savefig(data_directory + 'figure_u_a.png')
| bsd-3-clause |
suttond/MODOI | ase/utils/sphinx.py | 2 | 6976 | from __future__ import print_function
import os
import traceback
import warnings
from os.path import join
from stat import ST_MTIME
from docutils import nodes
from docutils.parsers.rst.roles import set_classes
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from ase.utils import exec_
def mol_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
n = []
t = ''
while text:
if text[0] == '_':
n.append(nodes.Text(t))
t = ''
n.append(nodes.subscript(text=text[1]))
text = text[2:]
else:
t += text[0]
text = text[1:]
n.append(nodes.Text(t))
return n, []
def svn_role_tmpl(urlroot,
role,
rawtext, text, lineno, inliner, options={}, content=[]):
if text[-1] == '>':
i = text.index('<')
name = text[:i - 1]
text = text[i + 1:-1]
else:
name = text
if name[0] == '~':
name = name.split('/')[-1]
text = text[1:]
if '?' in name:
name = name[:name.index('?')]
ref = urlroot + text
set_classes(options)
node = nodes.reference(rawtext, name, refuri=ref,
**options)
return [node], []
def trac_role_tmpl(urlroot,
role,
rawtext, text, lineno, inliner, options={}, content=[]):
if text[-1] == '>':
i = text.index('<')
name = text[:i - 1]
text = text[i + 1:-1]
else:
name = text
if name[0] == '~':
name = name.split('/')[-1]
text = text[1:]
if '?' in name:
name = name[:name.index('?')]
ref = urlroot + text
set_classes(options)
node = nodes.reference(rawtext, name, refuri=ref,
**options)
return [node], []
def epydoc_role_tmpl(package_name, urlroot,
role,
rawtext, text, lineno, inliner, options={}, content=[]):
name = None
if text[-1] == '>':
i = text.index('<')
name = text[:i - 1]
text = text[i + 1:-1]
components = text.split('.')
if components[0] != package_name:
components.insert(0, package_name)
if name is None:
name = components[-1]
try:
module = None
for n in range(2, len(components) + 1):
module = __import__('.'.join(components[:n]))
except ImportError:
if module is None:
print('epydoc: could not process: %s' % str(components))
raise
for component in components[1:n]:
module = getattr(module, component)
ref = '.'.join(components[:n])
if isinstance(module, type):
ref += '-class.html'
else:
ref += '-module.html'
if n < len(components):
ref += '#' + components[-1]
else:
ref = '.'.join(components) + '-module.html'
ref = urlroot + ref
set_classes(options)
node = nodes.reference(rawtext, name,
refuri=ref,
**options)
return [node], []
def creates():
"""Generator for Python scripts and their output filenames."""
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.py'):
path = join(dirpath, filename)
lines = open(path).readlines()
if len(lines) == 0:
continue
line = lines[0]
if 'coding: utf-8' in line:
line = lines[1]
if line.startswith('# creates:'):
yield dirpath, filename, [file.rstrip(',')
for file in line.split()[2:]]
if '.svn' in dirnames:
dirnames.remove('.svn')
if 'build' in dirnames:
dirnames.remove('build')
def create_png_files():
errcode = os.system('povray -h 2> /dev/null')
if errcode:
warnings.warn('No POVRAY!')
# Replace write_pov with write_png:
from ase.io import pov
from ase.io.png import write_png
def write_pov(filename, atoms, run_povray=False, **parameters):
p = {}
for key in ['rotation', 'show_unit_cell', 'radii',
'bbox', 'colors', 'scale']:
if key in parameters:
p[key] = parameters[key]
write_png(filename[:-3] + 'png', atoms, **p)
pov.write_pov = write_pov
olddir = os.getcwd()
for dir, pyname, outnames in creates():
path = join(dir, pyname)
t0 = os.stat(path)[ST_MTIME]
run = False
for outname in outnames:
try:
t = os.stat(join(dir, outname))[ST_MTIME]
except OSError:
run = True
break
else:
if t < t0:
run = True
break
if run:
print('running:', path)
os.chdir(dir)
plt.figure()
try:
exec_(compile(open(pyname).read(), pyname, 'exec'), {})
except KeyboardInterrupt:
return
except:
traceback.print_exc()
finally:
os.chdir(olddir)
plt.close()
for outname in outnames:
print(dir, outname)
def clean():
"""Remove all generated files."""
for dir, pyname, outnames in creates():
for outname in outnames:
if os.path.isfile(os.path.join(dir, outname)):
os.remove(os.path.join(dir, outname))
def visual_inspection():
"""Manually inspect generated files."""
import subprocess
images = []
text = []
pdf = []
for dir, pyname, outnames in creates():
for outname in outnames:
path = os.path.join(dir, outname)
ext = path.rsplit('.', 1)[1]
if ext == 'pdf':
pdf.append(path)
elif ext in ['csv', 'txt', 'out']:
text.append(path)
else:
images.append(path)
subprocess.call(['eog'] + images)
subprocess.call(['evince'] + pdf)
subprocess.call(['more'] + text)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Process generated files.')
parser.add_argument('command', nargs='?', default='list',
choices=['list', 'inspect', 'clean'])
args = parser.parse_args()
if args.command == 'clean':
clean()
elif args.command == 'list':
for dir, pyname, outnames in creates():
for outname in outnames:
print(os.path.join(dir, outname))
else:
visual_inspection()
| lgpl-3.0 |
buguen/pylayers | pylayers/antprop/examples/ex_antenna5.py | 3 | 2472 | from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : evaluates the relative error of reconstruction (vsh3) for various values of order l
5 : display the results
"""
filename = 'S1R1.mat'
A = Antenna(filename,'ant/UWBAN/Matfile')
B = Antenna(filename,'ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
B.Ftheta = B.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
B.Fphi = B.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
A = vsh(A,dsf)
B = vsh(B,dsf)
tn = []
tet = []
tep = []
te = []
tmse = []
l = 20
A.C.s1tos2(l)
B.C.s1tos2(l)
u = np.shape(A.C.Br.s2)
Nf = u[0]
Nk = u[1]
tr = np.arange(2,Nk)
A.C.s2tos3_new(Nk)
B.C.s2tos3(1e-6)
UA = np.sum(A.C.Cr.s3*np.conj(A.C.Cr.s3),axis=0)
UB = np.sum(B.C.Cr.s3*np.conj(B.C.Cr.s3),axis=0)
ua = A.C.Cr.ind3
ub = B.C.Cr.ind3
da ={}
db ={}
for k in range(Nk):
da[str(ua[k])]=UA[k]
db[str(ub[k])]=UB[k]
tu = []
for t in sort(da.keys()):
tu.append(da[t] - db[t])
errelTha,errelPha,errela = A.errel(l,20,dsf,typ='s3')
errelThb,errelPhb,errelb = B.errel(l,20,dsf,typ='s3')
print "a: nok",errela,errelPha,errelTha
print "b: ok ",errelb,errelPhb,errelThb
for r in tr:
E = A.C.s2tos3_new(r)
errelTh,errelPh,errel = A.errel(l,20,dsf,typ='s3')
print 'r : ',r,errel,E
tet.append(errelTh)
tep.append(errelPh)
te.append(errel)
#
line1 = plt.plot(array(tr),10*log10(array(tep)),'b')
line2 = plt.plot(array(tr),10*log10(array(tet)),'r')
line3 = plt.plot(array(tr),10*log10(array(te)),'g')
#
plt.xlabel('order l')
plt.ylabel(u'$\epsilon_{rel}$ (dB)',fontsize=18)
plt.title('Evolution of reconstruction relative error wrt order')
plt.legend((u'$\epsilon_{rel}^{\phi}$',u'$\epsilon_{rel}^{\\theta}$',u'$\epsilon_{rel}^{total}$'))
plt.legend((line1,line2,line3),('a','b','c'))
plt.show()
plt.legend(('errel_phi','errel_theta','errel'))
| lgpl-3.0 |
elkingtonmcb/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
rhyolight/nupic.research | projects/feedback/feedback_sequences_additional.py | 7 | 24229 |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file runs a number of experiments testing the effectiveness of feedback
with noisy inputs.
"""
import os
from copy import deepcopy
import numpy
import cPickle
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats
matplotlib.rcParams['pdf.fonttype'] = 42
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
import feedback_experiment
from feedback_experiment import FeedbackExperiment
def convertSequenceMachineSequence(generatedSequences):
"""
Convert a sequence from the SequenceMachine into a list of sequences, such
that each sequence is a list of set of SDRs.
"""
sequenceList = []
currentSequence = []
for s in generatedSequences:
if s is None:
sequenceList.append(currentSequence)
currentSequence = []
else:
currentSequence.append(s)
return sequenceList
def generateSequences(n=2048, w=40, sequenceLength=5, sequenceCount=2,
sharedRange=None, seed=42):
"""
Generate high order sequences using SequenceMachine
"""
# Lots of room for noise sdrs
patternAlphabetSize = 10*(sequenceLength * sequenceCount)
patternMachine = PatternMachine(n, w, patternAlphabetSize, seed)
sequenceMachine = SequenceMachine(patternMachine, seed)
numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength,
sharedRange=sharedRange )
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
return sequenceMachine, generatedSequences, numbers
def sparsenRange(sequenceMachine, sequences, startRange, endRange, probaZero):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p < endRange and p >= startRange:
newsdr = numpy.array(list(sdr))
keep = numpy.random.rand(len(newsdr)) > probaZero
newsdr = newsdr[keep==True]
newSequence.append(set(newsdr))
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def crossSequences(sequenceMachine, sequences, pos):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos:
newSequence.append(sequences[(numseq +1) % len(sequences)][p])
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addTemporalNoise(sequenceMachine, sequences, noiseStart, noiseEnd, noiseProba):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= noiseStart and p < noiseEnd:
newsdr = patternMachine.addNoise(sdr, noiseProba)
newSequence.append(newsdr)
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addPerturbation(sequenceMachine, sequences, noiseType, pos, number=1):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos and p < pos+number:
if noiseType == "skip":
pass
elif noiseType == "replace":
newsdr = patternMachine.addNoise(sdr, 1.0)
newSequence.append(newsdr)
elif noiseType == "repeat":
newSequence.append(s[p-1])
else:
raise("Unrecognized Noise Type!")
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def runInference(exp, sequences, enableFeedback=True, apicalTiebreak=True,
apicalModulationBasalThreshold=True, inertia=True):
"""
Run inference on this set of sequences and compute error
"""
if enableFeedback:
print "Feedback enabled: "
else:
print "Feedback disabled: "
error = 0
activityTraces = []
responses = []
for i,sequence in enumerate(sequences):
(avgActiveCells, avgPredictedActiveCells, activityTrace, responsesThisSeq) = exp.infer(
sequence, sequenceNumber=i, enableFeedback=enableFeedback, apicalTiebreak=apicalTiebreak,
apicalModulationBasalThreshold=apicalModulationBasalThreshold, inertia=inertia)
error += avgActiveCells
activityTraces.append(activityTrace)
responses.append(responsesThisSeq)
print " "
error /= len(sequences)
print "Average error = ",error
return error, activityTraces, responses
def runExp(noiseProba, numSequences, nbSeeds, noiseType, sequenceLen, sharedRange, noiseRange, whichPlot, plotTitle):
allowedNoises = ("skip", "replace", "repeat", "crossover", "pollute")
if noiseType not in allowedNoises:
raise(RuntimeError("noiseType must be one of the following: ".join(allowedNoises)))
meanErrsFB = []; meanErrsNoFB = []; meanErrsNoNoise = []
stdErrsFB = []; stdErrsNoFB = []; stdErrsNoNoise = []
meanPerfsFB = []; stdPerfsFB = []
meanPerfsNoFB = []; stdPerfsNoFB = []
stdsFB = []
stdsNoFB=[]
activitiesFB=[]; activitiesNoFB=[]
diffsFB = []
diffsNoFB = []
overlapsFBL2=[]; overlapsNoFBL2=[]
overlapsFBL2Next=[]; overlapsNoFBL2Next=[]
overlapsFBL4=[]; overlapsNoFBL4=[]
overlapsFBL4Next=[]; overlapsNoFBL4Next=[]
corrsPredCorrectFBL4=[]; corrsPredCorrectNoFBL4=[]
diffsFBL4Pred=[]; diffsNoFBL4Pred=[]
diffsFBL4PredNext=[]; diffsNoFBL4PredNext=[]
diffsFBL2=[]; diffsNoFBL2=[]
diffsFBL2Next=[]; diffsNoFBL2Next=[]
diffsNoAT = []; overlapsNoATL2=[]; overlapsNoATL2Next=[]; overlapsNoATL4=[]
overlapsNoATL4Next=[]
corrsPredCorrectNoATL4=[]; diffsNoATL4Pred=[]; diffsNoATL4PredNext=[]
diffsNoATL2=[]; diffsNoATL2Next=[]
diffsNoAM = []; overlapsNoAML2=[]; overlapsNoAML2Next=[]; overlapsNoAML4=[]
overlapsNoAML4Next=[]
corrsPredCorrectNoAML4=[]; diffsNoAML4Pred=[]; diffsNoAML4PredNext=[]
diffsNoAML2=[]; diffsNoAML2Next=[]
diffsNoIN = []; overlapsNoINL2=[]; overlapsNoINL2Next=[]; overlapsNoINL4=[]
overlapsNoINL4Next=[]
corrsPredCorrectNoINL4=[]; diffsNoINL4Pred=[]; diffsNoINL4PredNext=[]
diffsNoINL2=[]; diffsNoINL2Next=[]
errorsFB=[]; errorsNoFB=[]; errorsNoNoise=[]
perfsFB = []; perfsNoFB = []
#for probaZero in probaZeros:
seed = 42
for seedx in range(nbSeeds):
seed = seedx + 123
profile = False,
L4Overrides = {"cellsPerColumn": 8}
numpy.random.seed(seed)
# Create the sequences and arrays
print "Generating sequences..."
sequenceMachine, generatedSequences, numbers = generateSequences(
sequenceLength=sequenceLen, sequenceCount=numSequences,
sharedRange=sharedRange,
seed=seed)
sequences = convertSequenceMachineSequence(generatedSequences)
noisySequences = deepcopy(sequences)
# Apply noise to sequences
noisySequences = addTemporalNoise(sequenceMachine, noisySequences,
noiseStart=noiseRange[0], noiseEnd=noiseRange[1],
noiseProba=noiseProba)
# *In addition* to this, add crossover or single-point noise
if noiseType == "crossover":
noisySequences = crossSequences(sequenceMachine, noisySequences,
pos=sequenceLen/2)
elif noiseType in ("repeat", "replace", "skip"):
noisySequences = addPerturbation(sequenceMachine, noisySequences,
noiseType=noiseType, pos=sequenceLen/2, number=1)
inferenceErrors = []
#Setup experiment and train the network on sequences
print "Learning sequences..."
exp = FeedbackExperiment(
numLearningPasses= 2*sequenceLen, # To handle high order sequences
seed=seed,
L4Overrides=L4Overrides,
)
exp.learnSequences(sequences)
print "Number of columns in exp: ", exp.numColumns
print "Sequences learned!"
# Run inference without any noise. This becomes our baseline error
standardError, activityNoNoise, responsesNoNoise = runInference(exp, sequences)
inferenceErrors.append(standardError)
runError, activityFB, responsesFB = runInference(
exp, noisySequences, enableFeedback=True)
runError, activityNoFB, responsesNoFB = runInference(
exp, noisySequences, enableFeedback=False)
runError, activityNoAT, responsesNoAT = runInference(
exp, noisySequences, enableFeedback=True, apicalTiebreak=False)
runError, activityNoAT, responsesNoAM = runInference(
exp, noisySequences, enableFeedback=True, apicalModulationBasalThreshold=False)
runError, activityNoIN, responsesNoIN = runInference(
exp, noisySequences, enableFeedback=True, inertia=False)
# Now that actual processing is done, we compute various statistics and plot graphs.
seqlen = len(noisySequences[0])
sdrlen = 2048 * 8 # Should be the total number of cells in L4. Need to make this more parametrized!
for numseq in range(len(responsesNoNoise)):
diffsFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAT.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoATL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAM.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoAML4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoIN.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoINL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
cpcfb = []; cpcnofb=[]; cpcnoat=[]; cpcnoam=[]; cpcnoin=[];
for x in range(seqlen):
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcfb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnofb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAT[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoat.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAM[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoam.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoIN[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoin.append(numpy.corrcoef(z1, z2)[0,1])
# Note that the correlations are appended across all seeds and sequences
corrsPredCorrectNoFBL4.append(cpcnofb[1:])
corrsPredCorrectNoATL4.append(cpcnoat[1:])
corrsPredCorrectNoINL4.append(cpcnoin[1:])
corrsPredCorrectNoAML4.append(cpcnoam[1:])
corrsPredCorrectFBL4.append(cpcfb[1:])
# diffsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
print "Size of L2 responses (FB):", [len(responsesFB[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L2 responses (NoNoise):", [len(responsesNoNoise[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (FB):", [len(responsesFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoFB):", [len(responsesNoFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAT):", [len(responsesNoAT[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAM):", [len(responsesNoAM[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoIN):", [len(responsesNoIN[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoNoise):", [len(responsesNoNoise[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 predictions (FB):", [len(responsesFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoFB):", [len(responsesNoFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAT):", [len(responsesNoAT[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAM):", [len(responsesNoAM[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoIN):", [len(responsesNoIN[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoNoise):", [len(responsesNoNoise[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "L2 overlap with current (FB): ", overlapsFBL2[-1]
print "L4 overlap with current (FB): ", overlapsFBL4[-1]
print "L4 overlap with current (NoFB): ", overlapsNoFBL4[-1]
print "L4 correlation pred/correct (FB): ", corrsPredCorrectFBL4[-1]
print "L4 correlation pred/correct (NoFB): ", corrsPredCorrectNoFBL4[-1]
print "L4 correlation pred/correct (NoAT): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoAM): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoIN): ", corrsPredCorrectNoATL4[-1]
print "NoNoise sequence:", [list(x)[:2] for x in sequences[numseq]]
print "Noise sequence:", [list(x)[:2] for x in noisySequences[numseq]]
print "NoNoise L4 responses:", [list(x)[:2] for x in responsesNoNoise[numseq]['L4Responses']]
print "NoFB L4 responses:", [list(x)[:2] for x in responsesNoFB[numseq]['L4Responses']]
print ""
plt.figure()
allDataSets = (corrsPredCorrectFBL4, corrsPredCorrectNoFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
allmeans = [numpy.mean(x) for x in allDataSets]
allstds = [numpy.std(x) for x in allDataSets]
nbbars = len(allmeans)
plt.bar(2*(1+numpy.arange(nbbars))-.5, allmeans, 1.0, color='r', edgecolor='none', yerr=allstds, capsize=5, ecolor='k')
for nn in range(1, nbbars):
plt.vlines([2, 2 +2*nn], 1.2, 1.2+(nn/10.0), lw=2); plt.hlines(1.2+(nn/10.0), 2, 2+2*nn, lw=2)
pval = scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(allDataSets[nn]).ravel())[1]
if pval > 0.05:
pvallabel = ' o' #r'$o$'
elif pval > 0.01:
pvallabel = '*'
elif pval > 0.001:
pvallabel = '**'
else:
pvallabel = '***'
plt.text(3, 1.2+(nn/10.0)+.02, pvallabel, fontdict={"size":14})
plt.xticks(2*(1+numpy.arange(nbbars)), ('Full', 'No\nFB', 'No Earlier\nFiring', 'No Thresold\nModulation', 'No Slower\nDynamics'))
plt.ylabel("Avg. Prediction Performance");
plt.title(plotTitle)
plt.savefig(plotTitle+".png")
# scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(corrsPredCorrectNoATL4).ravel())
plt.show()
return (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4, corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
if __name__ == "__main__":
plt.ion()
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, no shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, no shared range")
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, shared range")
#
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, no shared range")
| gpl-3.0 |
hlin117/statsmodels | statsmodels/tsa/filters/tests/test_filters.py | 27 | 41409 | from datetime import datetime
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_raises, assert_)
from numpy import array, column_stack
from statsmodels.datasets import macrodata
from statsmodels.tsa.base.datetools import dates_from_range
from pandas import Index, DataFrame, DatetimeIndex, concat
from statsmodels.tsa.filters.api import (bkfilter, hpfilter, cffilter,
convolution_filter, recursive_filter)
def test_bking1d():
"""
Test Baxter King band-pass filter. Results are taken from Stata
"""
bking_results = array([7.320813, 2.886914, -6.818976, -13.49436,
-13.27936, -9.405913, -5.691091, -5.133076, -7.273468,
-9.243364, -8.482916, -4.447764, 2.406559, 10.68433,
19.46414, 28.09749, 34.11066, 33.48468, 24.64598, 9.952399,
-4.265528, -12.59471, -13.46714, -9.049501, -3.011248,
.5655082, 2.897976, 7.406077, 14.67959, 18.651, 13.05891,
-2.945415, -24.08659, -41.86147, -48.68383, -43.32689,
-31.66654, -20.38356, -13.76411, -9.978693, -3.7704, 10.27108,
31.02847, 51.87613, 66.93117, 73.51951, 73.4053, 69.17468,
59.8543, 38.23899, -.2604809, -49.0107, -91.1128, -112.1574,
-108.3227, -86.51453, -59.91258, -40.01185, -29.70265,
-22.76396, -13.08037, 1.913622, 20.44045, 37.32873, 46.79802,
51.95937, 59.67393, 70.50803, 81.27311, 83.53191, 67.72536,
33.78039, -6.509092, -37.31579, -46.05207, -29.81496, 1.416417,
28.31503,
32.90134, 8.949259, -35.41895, -84.65775, -124.4288, -144.6036,
-140.2204, -109.2624, -53.6901, 15.07415, 74.44268, 104.0403,
101.0725, 76.58291, 49.27925, 36.15751, 36.48799, 37.60897,
27.75998, 4.216643, -23.20579, -39.33292, -36.6134, -20.90161,
-4.143123, 5.48432, 9.270075, 13.69573, 22.16675, 33.01987,
41.93186, 47.12222, 48.62164, 47.30701, 40.20537, 22.37898,
-7.133002, -43.3339, -78.51229, -101.3684, -105.2179,
-90.97147,
-68.30824, -48.10113, -35.60709, -31.15775, -31.82346,
-32.49278, -28.22499, -14.42852, 10.1827, 36.64189, 49.43468,
38.75517, 6.447761, -33.15883, -62.60446, -72.87829, -66.54629,
-52.61205, -38.06676, -26.19963, -16.51492, -7.007577,
.6125674,
7.866972, 14.8123, 22.52388, 30.65265, 39.47801, 49.05027,
59.02925,
72.88999, 95.08865, 125.8983, 154.4283, 160.7638, 130.6092,
67.84406, -7.070272, -68.08128, -99.39944, -104.911,
-100.2372, -98.11596, -104.2051, -114.0125, -113.3475,
-92.98669, -51.91707, -.7313812, 43.22938, 64.62762, 64.07226,
59.35707, 67.06026, 91.87247, 124.4591, 151.2402, 163.0648,
154.6432])
X = macrodata.load().data['realinv']
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y,bking_results,4)
def test_bking2d():
"""
Test Baxter-King band-pass filter with 2d input
"""
bking_results = array([[7.320813,-.0374475], [2.886914,-.0430094],
[-6.818976,-.053456], [-13.49436,-.0620739], [-13.27936,-.0626929],
[-9.405913,-.0603022], [-5.691091,-.0630016], [-5.133076,-.0832268],
[-7.273468,-.1186448], [-9.243364,-.1619868], [-8.482916,-.2116604],
[-4.447764,-.2670747], [2.406559,-.3209931], [10.68433,-.3583075],
[19.46414,-.3626742], [28.09749,-.3294618], [34.11066,-.2773388],
[33.48468,-.2436127], [24.64598,-.2605531], [9.952399,-.3305166],
[-4.265528,-.4275561], [-12.59471,-.5076068], [-13.46714,-.537573],
[-9.049501,-.5205845], [-3.011248,-.481673], [.5655082,-.4403994],
[2.897976,-.4039957], [7.406077,-.3537394], [14.67959,-.2687359],
[18.651,-.1459743], [13.05891,.0014926], [-2.945415,.1424277],
[-24.08659,.2451936], [-41.86147,.288541], [-48.68383,.2727282],
[-43.32689,.1959127], [-31.66654,.0644874], [-20.38356,-.1158372],
[-13.76411,-.3518627], [-9.978693,-.6557535], [-3.7704,-1.003754],
[10.27108,-1.341632], [31.02847,-1.614486], [51.87613,-1.779089],
[66.93117,-1.807459], [73.51951,-1.679688], [73.4053,-1.401012],
[69.17468,-.9954996], [59.8543,-.511261], [38.23899,-.0146745],
[-.2604809,.4261311], [-49.0107,.7452514], [-91.1128,.8879492],
[-112.1574,.8282748], [-108.3227,.5851508], [-86.51453,.2351699],
[-59.91258,-.1208998], [-40.01185,-.4297895], [-29.70265,-.6821963],
[-22.76396,-.9234254], [-13.08037,-1.217539], [1.913622,-1.57367],
[20.44045,-1.927008], [37.32873,-2.229565], [46.79802,-2.463154],
[51.95937,-2.614697], [59.67393,-2.681357], [70.50803,-2.609654],
[81.27311,-2.301618], [83.53191,-1.720974], [67.72536,-.9837123],
[33.78039,-.2261613], [-6.509092,.4546985], [-37.31579,1.005751],
[-46.05207,1.457224], [-29.81496,1.870815], [1.416417,2.263313],
[28.31503,2.599906], [32.90134,2.812282], [8.949259,2.83358],
[-35.41895,2.632667], [-84.65775,2.201077], [-124.4288,1.598951],
[-144.6036,.9504762], [-140.2204,.4187932], [-109.2624,.1646726],
[-53.6901,.2034265], [15.07415,.398165], [74.44268,.5427476],
[104.0403,.5454975], [101.0725,.4723354], [76.58291,.4626823],
[49.27925,.5840143], [36.15751,.7187981], [36.48799,.6058422],
[37.60897,.1221227], [27.75998,-.5891272], [4.216643,-1.249841],
[-23.20579,-1.594972], [-39.33292,-1.545968], [-36.6134,-1.275494],
[-20.90161,-1.035783], [-4.143123,-.9971732], [5.48432,-1.154264],
[9.270075,-1.29987], [13.69573,-1.240559], [22.16675,-.9662656],
[33.01987,-.6420301], [41.93186,-.4698712], [47.12222,-.4527797],
[48.62164,-.4407153], [47.30701,-.2416076], [40.20537,.2317583],
[22.37898,.8710276], [-7.133002,1.426177], [-43.3339,1.652785],
[-78.51229,1.488021], [-101.3684,1.072096], [-105.2179,.6496446],
[-90.97147,.4193682], [-68.30824,.41847], [-48.10113,.5253419],
[-35.60709,.595076], [-31.15775,.5509905], [-31.82346,.3755519],
[-32.49278,.1297979], [-28.22499,-.0916165], [-14.42852,-.2531037],
[10.1827,-.3220784], [36.64189,-.2660561], [49.43468,-.1358522],
[38.75517,-.0279508], [6.447761,.0168735], [-33.15883,.0315687],
[-62.60446,.0819507], [-72.87829,.2274033], [-66.54629,.4641401],
[-52.61205,.7211093], [-38.06676,.907773], [-26.19963,.9387103],
[-16.51492,.7940786], [-7.007577,.5026631], [.6125674,.1224996],
[7.866972,-.2714422], [14.8123,-.6273921], [22.52388,-.9124271],
[30.65265,-1.108861], [39.47801,-1.199206], [49.05027,-1.19908],
[59.02925,-1.139046], [72.88999,-.9775021], [95.08865,-.6592603],
[125.8983,-.1609712], [154.4283,.4796201], [160.7638,1.100565],
[130.6092,1.447148], [67.84406,1.359608], [-7.070272,.8931825],
[-68.08128,.2619787], [-99.39944,-.252208], [-104.911,-.4703874],
[-100.2372,-.4430657], [-98.11596,-.390683], [-104.2051,-.5647846],
[-114.0125,-.9397582], [-113.3475,-1.341633], [-92.98669,-1.567337],
[-51.91707,-1.504943], [-.7313812,-1.30576], [43.22938,-1.17151],
[64.62762,-1.136151], [64.07226,-1.050555], [59.35707,-.7308369],
[67.06026,-.1766731], [91.87247,.3898467], [124.4591,.8135461],
[151.2402,.9644226], [163.0648,.6865934], [154.6432,.0115685]])
X = macrodata.load().data[['realinv','cpi']].view((float,2))
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y,bking_results,4)
def test_hpfilter():
"""
Test Hodrick-Prescott Filter. Results taken from Stata.
"""
hpfilt_res = array([[3.951191484487844718e+01,2.670837085155121713e+03],
[8.008853245681075350e+01,2.698712467543189177e+03],
[4.887545512195401898e+01,2.726612544878045810e+03],
[3.059193256079834100e+01,2.754612067439201837e+03],
[6.488266733421960453e+01,2.782816332665780465e+03],
[2.304024204546703913e+01,2.811349757954532834e+03],
[-1.355312369487364776e+00,2.840377312369487299e+03],
[-6.746236512580753697e+01,2.870078365125807522e+03],
[-8.136743836853429457e+01,2.900631438368534418e+03],
[-6.016789026443257171e+01,2.932172890264432681e+03],
[-4.636922433138215638e+01,2.964788224331382025e+03],
[-2.069533915570400495e+01,2.998525339155703932e+03],
[-2.162152558595607843e+00,3.033403152558595593e+03],
[-4.718647774311648391e+00,3.069427647774311481e+03],
[-1.355645669169007306e+01,3.106603456691690099e+03],
[-4.436926204475639679e+01,3.144932262044756499e+03],
[-4.332027378211660107e+01,3.184407273782116590e+03],
[-4.454697106352068658e+01,3.224993971063520803e+03],
[-2.629875787765286077e+01,3.266630757877652741e+03],
[-4.426119635629265758e+01,3.309228196356292756e+03],
[-1.443441190762496262e+01,3.352680411907625057e+03],
[-2.026686669186437939e+01,3.396853866691864368e+03],
[-1.913700136208899494e+01,3.441606001362089046e+03],
[-5.482458977940950717e+01,3.486781589779409387e+03],
[-1.596244517937793717e+01,3.532213445179378141e+03],
[-1.374011542874541192e+01,3.577700115428745448e+03],
[1.325482813403914406e+01,3.623030171865960710e+03],
[5.603040174253828809e+01,3.667983598257461836e+03],
[1.030743373627105939e+02,3.712348662637289181e+03],
[7.217534795943993231e+01,3.755948652040559864e+03],
[5.462972503693208637e+01,3.798671274963067845e+03],
[4.407065050666142270e+01,3.840449349493338559e+03],
[3.749016270204992907e+01,3.881249837297949853e+03],
[-1.511244199923112319e+00,3.921067244199923152e+03],
[-9.093507374079763395e+00,3.959919507374079785e+03],
[-1.685361946760258434e+01,3.997823619467602384e+03],
[2.822211031434289907e+01,4.034790889685657021e+03],
[6.117590627896424849e+01,4.070822093721035344e+03],
[5.433135391434370831e+01,4.105935646085656117e+03],
[3.810480376716623141e+01,4.140188196232833434e+03],
[7.042964928802848590e+01,4.173670350711971878e+03],
[4.996346842507591646e+01,4.206496531574924120e+03],
[4.455282059571254649e+01,4.238825179404287155e+03],
[-7.584961950576143863e+00,4.270845961950576566e+03],
[-4.620339247697120300e+01,4.302776392476971523e+03],
[-7.054024364552969928e+01,4.334829243645529459e+03],
[-6.492941099801464588e+01,4.367188410998014660e+03],
[-1.433567024239555394e+02,4.399993702423955256e+03],
[-5.932834493089012540e+01,4.433344344930889747e+03],
[-6.842096758743628016e+01,4.467249967587436004e+03],
[-6.774011924654860195e+01,4.501683119246548813e+03],
[-9.030958565658056614e+01,4.536573585656580690e+03],
[-4.603981499136807543e+01,4.571808814991368308e+03],
[2.588118806672991923e+01,4.607219811933269739e+03],
[3.489419371912299539e+01,4.642608806280876706e+03],
[7.675179642495095322e+01,4.677794203575049323e+03],
[1.635497817724171910e+02,4.712616218227582976e+03],
[1.856079654765617306e+02,4.746963034523438182e+03],
[1.254269446392718237e+02,4.780825055360728584e+03],
[1.387413113837174024e+02,4.814308688616282780e+03],
[6.201826599282230745e+01,4.847598734007177882e+03],
[4.122129542972197669e+01,4.880966704570278125e+03],
[-4.120287475842360436e+01,4.914722874758424041e+03],
[-9.486328233441963675e+01,4.949203282334419782e+03],
[-1.894232132641573116e+02,4.984718213264157384e+03],
[-1.895766639620087517e+02,5.021518663962008759e+03],
[-1.464092413342650616e+02,5.059737241334265491e+03],
[-1.218770668721217589e+02,5.099388066872122181e+03],
[-4.973075629078175552e+01,5.140393756290781312e+03],
[-5.365375213897277717e+01,5.182600752138972894e+03],
[-7.175241524251214287e+01,5.225824415242512259e+03],
[-7.834757283225462743e+01,5.269846572832254424e+03],
[-6.264220687943907251e+01,5.314404206879438789e+03],
[-3.054332122210325906e+00,5.359185332122210639e+03],
[4.808218808024685131e+01,5.403838811919753425e+03],
[2.781399326736391231e+00,5.448011600673263274e+03],
[-2.197570415173231595e+01,5.491380704151732061e+03],
[1.509441335012807031e+02,5.533624866498719712e+03],
[1.658909029574851957e+02,5.574409097042514986e+03],
[2.027292548049981633e+02,5.613492745195001589e+03],
[1.752101578176061594e+02,5.650738842182393455e+03],
[1.452808749847536092e+02,5.686137125015246056e+03],
[1.535481629475025329e+02,5.719786837052497503e+03],
[1.376169777998875361e+02,5.751878022200112355e+03],
[1.257703080340770612e+02,5.782696691965922582e+03],
[-2.524186846895645431e+01,5.812614868468956047e+03],
[-6.546618027042404719e+01,5.842083180270424236e+03],
[1.192352023580315290e+01,5.871536479764196883e+03],
[1.043482970188742911e+02,5.901368702981125352e+03],
[2.581376184768396342e+01,5.931981238152316109e+03],
[6.634330880534071184e+01,5.963840691194659485e+03],
[-4.236780162594641297e+01,5.997429801625946311e+03],
[-1.759397735321817891e+02,6.033272773532181418e+03],
[-1.827933311233055065e+02,6.071867331123305121e+03],
[-2.472312362505917918e+02,6.113601236250591683e+03],
[-2.877470049336488955e+02,6.158748004933649099e+03],
[-2.634066336693540507e+02,6.207426633669354487e+03],
[-1.819572770763625158e+02,6.259576277076362203e+03],
[-1.175034606274621183e+02,6.314971460627461965e+03],
[-4.769898649718379602e+01,6.373272986497183410e+03],
[1.419578280287896632e+01,6.434068217197121157e+03],
[6.267929662760798237e+01,6.496914703372392069e+03],
[6.196413196753746888e+01,6.561378868032462378e+03],
[5.019769125317907310e+01,6.627066308746821051e+03],
[4.665364933213822951e+01,6.693621350667861407e+03],
[3.662430749527266016e+01,6.760719692504727391e+03],
[7.545680850246480986e+01,6.828066191497535328e+03],
[6.052940492147536133e+01,6.895388595078524304e+03],
[6.029518881462354329e+01,6.962461811185376064e+03],
[2.187042136652689805e+01,7.029098578633473153e+03],
[2.380067926824722235e+01,7.095149320731752596e+03],
[-7.119129802169481991e+00,7.160478129802169860e+03],
[-3.194497359120850888e+01,7.224963973591208742e+03],
[-1.897137038934124575e+01,7.288481370389341464e+03],
[-1.832687287845146784e+01,7.350884872878451461e+03],
[4.600482336597542599e+01,7.412017176634024509e+03],
[2.489047706403016491e+01,7.471709522935970199e+03],
[6.305909392127250612e+01,7.529821906078727807e+03],
[4.585212309498183458e+01,7.586229876905018500e+03],
[9.314260180878318351e+01,7.640848398191216802e+03],
[1.129819097095369216e+02,7.693621090290463144e+03],
[1.204662123176703972e+02,7.744549787682329224e+03],
[1.336860614601246198e+02,7.793706938539875409e+03],
[1.034567175813735957e+02,7.841240282418626521e+03],
[1.403118873372050075e+02,7.887381112662795204e+03],
[1.271726169351004501e+02,7.932425383064899506e+03],
[8.271925765282139764e+01,7.976756742347178260e+03],
[-3.197432211752584408e+01,8.020838322117525422e+03],
[-1.150209535194062482e+02,8.065184953519406008e+03],
[-1.064694837456772802e+02,8.110291483745677397e+03],
[-1.190428718925368230e+02,8.156580871892536379e+03],
[-1.353635336292991269e+02,8.204409533629299403e+03],
[-9.644348283027102298e+01,8.254059482830271008e+03],
[-6.143413116116607853e+01,8.305728131161165948e+03],
[-3.019161311097923317e+01,8.359552613110980019e+03],
[1.384333163552582846e+00,8.415631666836447039e+03],
[-4.156016073666614830e+01,8.474045160736666730e+03],
[-4.843882841860977351e+01,8.534873828418609264e+03],
[-6.706442838867042155e+01,8.598172428388670596e+03],
[-2.019644488579979225e+01,8.663965444885800025e+03],
[-4.316446881084630149e+00,8.732235446881084499e+03],
[4.435061943264736328e+01,8.802952380567352520e+03],
[2.820550564155564643e+01,8.876083494358445023e+03],
[5.155624419490777655e+01,8.951623755805092514e+03],
[-4.318760899315748247e+00,9.029585760899315574e+03],
[-6.534632828542271454e+01,9.110014328285422380e+03],
[-7.226757738268497633e+01,9.192951577382684263e+03],
[-9.412378615444868046e+01,9.278398786154448317e+03],
[-1.191240653288368776e+02,9.366312065328836979e+03],
[-4.953669826751865912e+01,9.456588698267518339e+03],
[-6.017251579067487910e+01,9.549051515790675694e+03],
[-5.103438828313483100e+01,9.643492388283135369e+03],
[-7.343057830678117170e+01,9.739665578306781754e+03],
[-2.774245193054957781e+01,9.837293451930549054e+03],
[-3.380481112519191811e+00,9.936052481112519672e+03],
[-2.672779877794346248e+01,1.003560179877794326e+04],
[-3.217342505148371856e+01,1.013559842505148299e+04],
[-4.140567518359966925e+01,1.023568267518359971e+04],
[-6.687756033938057953e+00,1.033547475603393832e+04],
[7.300600408459467872e+01,1.043456899591540605e+04],
[6.862345670680042531e+01,1.053255554329319966e+04],
[5.497882461487461114e+01,1.062907017538512628e+04],
[9.612244093055960548e+01,1.072379155906944106e+04],
[1.978212770103891671e+02,1.081643272298961165e+04],
[1.362772276848754700e+02,1.090676677231512440e+04],
[2.637635494867263333e+02,1.099469045051327339e+04],
[1.876813256815166824e+02,1.108018567431848351e+04],
[1.711447873158413131e+02,1.116339921268415856e+04],
[5.257586460826678376e+01,1.124459513539173349e+04],
[4.710652228531762375e+01,1.132414447771468258e+04],
[-6.237613484241046535e+01,1.140245113484241119e+04],
[-9.982044354035315337e+01,1.147994844354035376e+04],
[-7.916275548997509759e+01,1.155703075548997549e+04],
[-9.526003459472303803e+01,1.163403003459472347e+04],
[-1.147987680369169539e+02,1.171122876803691724e+04],
[-1.900259054765901965e+02,1.178884990547659072e+04],
[-2.212256473439556430e+02,1.186704464734395515e+04],
[-2.071394278781845060e+02,1.194584542787818464e+04],
[-8.968541528904825100e+01,1.202514641528904758e+04],
[-6.189531564415665343e+01,1.210471231564415575e+04],
[-5.662878162551714922e+01,1.218425178162551674e+04],
[-4.961678134413705266e+01,1.226343478134413635e+04],
[-3.836288992144181975e+01,1.234189588992144127e+04],
[-8.956671991456460091e+00,1.241923867199145570e+04],
[3.907028461866866564e+01,1.249504271538133071e+04],
[1.865299000184495526e+01,1.256888200999815490e+04],
[4.279803532226833340e+01,1.264035496467773191e+04],
[3.962735362631610769e+01,1.270907164637368442e+04],
[1.412691291877854383e+02,1.277466887081221466e+04],
[1.256537791844366438e+02,1.283680822081556289e+04],
[7.067642758858892194e+01,1.289523957241141034e+04],
[1.108876647603192396e+02,1.294979133523968085e+04],
[9.956490829291760747e+01,1.300033609170708223e+04],
[1.571612709880937473e+02,1.304681572901190702e+04],
[2.318746375812715996e+02,1.308923436241872878e+04],
[2.635546670125277160e+02,1.312769433298747208e+04],
[2.044220965739259555e+02,1.316244290342607383e+04],
[2.213739418903714977e+02,1.319389205810962812e+04],
[1.020184547767112235e+02,1.322258154522328914e+04],
[-1.072694716663390864e+02,1.324918947166633916e+04],
[-3.490477058718843182e+02,1.327445770587188417e+04],
[-3.975570728533530200e+02,1.329906107285335383e+04],
[-3.331152428080622485e+02,1.332345624280806260e+04]])
dta = macrodata.load().data['realgdp']
res = column_stack((hpfilter(dta,1600)))
assert_almost_equal(res,hpfilt_res,6)
def test_cfitz_filter():
"""
Test Christiano-Fitzgerald Filter. Results taken from R.
"""
#NOTE: The Stata mata code and the matlab code it's based on are wrong.
cfilt_res = array([[0.712599537179426,0.439563468233128],
[1.06824041304411,0.352886666575907],
[1.19422467791128,0.257297004260607],
[0.970845473140327,0.114504692143872],
[0.467026976628563,-0.070734782329146],
[-0.089153511514031,-0.238609685132605],
[-0.452339254128573,-0.32376584042956],
[-0.513231214461187,-0.314288554228112],
[-0.352372578720063,-0.258815055101336],
[-0.160282602521333,-0.215076844089567],
[-0.0918782593827686,-0.194120745417214],
[-0.168083823205437,-0.158327420072693],
[-0.291595204965808,-0.0742727139742986],
[-0.348638756841307,0.037008291163602],
[-0.304328040874631,0.108196527328748],
[-0.215933150969686,0.0869231107437175],
[-0.165632621390694,-0.0130556619786275],
[-0.182326839507151,-0.126570926191824],
[-0.223737786804725,-0.205535321806185],
[-0.228939291453403,-0.269110078201836],
[-0.185518327227038,-0.375976507132174],
[-0.143900152461529,-0.53760115656157],
[-0.162749541550174,-0.660065018626038],
[-0.236263634756884,-0.588542352053736],
[-0.275785854309211,-0.236867929421996],
[-0.173666515108109,0.303436335579219],
[0.0963135720251639,0.779772338801993],
[0.427070069032285,0.929108075350647],
[0.629034743259998,0.658330841002647],
[0.557941248993624,0.118500049361018],
[0.227866624051603,-0.385048321099911],
[-0.179878859883227,-0.582223992561493],
[-0.428263000051965,-0.394053702908091],
[-0.381640684645912,0.0445437406977307],
[-0.0942745548364887,0.493997792757968],
[0.238132391504895,0.764519811304315],
[0.431293754256291,0.814755206427316],
[0.455010435813661,0.745567043101108],
[0.452800768971269,0.709401694610443],
[0.615754619329312,0.798293251119636],
[1.00256335412457,0.975856845059388],
[1.44841039351691,1.09097252730799],
[1.64651971120370,0.967823457118036],
[1.35534532901802,0.522397724737059],
[0.580492790312048,-0.16941343361609],
[-0.410746188031773,-0.90760401289056],
[-1.26148406066881,-1.49592867122591],
[-1.75784179124566,-1.87404167409849],
[-1.94478553960064,-2.14586210891112],
[-2.03751202708559,-2.465855239868],
[-2.20376059354166,-2.86294187189049],
[-2.39722338315852,-3.15004697654831],
[-2.38032366161537,-3.01390466643222],
[-1.91798022532025,-2.23395210271226],
[-0.982318490353716,-0.861346053067472],
[0.199047030343412,0.790266582335616],
[1.28582776574786,2.33731327460104],
[2.03565905376430,3.54085486821911],
[2.41201557412526,4.36519456268955],
[2.52011070482927,4.84810517685452],
[2.45618479815452,4.92906708807477],
[2.22272146945388,4.42591058990048],
[1.78307567169034,3.20962906108388],
[1.18234431860844,1.42568060336985],
[0.590069172333348,-0.461896808688991],
[0.19662302949837,-1.89020992539465],
[0.048307034171166,-2.53490571941987],
[-0.0141956981899000,-2.50020338531674],
[-0.230505187108187,-2.20625973569823],
[-0.700947410386801,-2.06643697511048],
[-1.27085123163060,-2.21536883679783],
[-1.64082547897928,-2.49016921117735],
[-1.62286182971254,-2.63948740221362],
[-1.31609762181362,-2.54685250637904],
[-1.03085567704873,-2.27157435428923],
[-1.01100120380112,-1.90404507430561],
[-1.19823958399826,-1.4123209792214],
[-1.26398933608383,-0.654000086153317],
[-0.904710628949692,0.447960016248203],
[-0.151340093679588,1.73970411237156],
[0.592926881165989,2.85741581650685],
[0.851660587507523,3.4410446351716],
[0.480324393352127,3.36870271362297],
[-0.165153230782417,2.82003806696544],
[-0.459235919375844,2.12858991660866],
[0.0271158842479935,1.55840980891556],
[1.18759188180671,1.17980298478623],
[2.43238266962309,0.904011534980672],
[3.08277213720132,0.595286911949837],
[2.79953663720953,0.148014782859571],
[1.73694442845833,-0.496297332023011],
[0.357638079951977,-1.33108149877570],
[-0.891418825216945,-2.22650083183366],
[-1.77646467793627,-2.89359299718574],
[-2.24614790863088,-2.97921619243347],
[-2.29048879096607,-2.30003092779280],
[-1.87929656465888,-1.05298381273274],
[-1.04510101454788,0.215837488618531],
[0.00413338508394524,0.937866257924888],
[0.906870625251025,0.92664365343019],
[1.33869057593416,0.518564571494679],
[1.22659678454440,0.288096869652890],
[0.79380139656044,0.541053084632774],
[0.38029431865832,1.01905199983437],
[0.183929413600038,1.10529586616777],
[0.140045425897033,0.393618564826736],
[0.0337313182352219,-0.86431819007665],
[-0.269208622829813,-1.85638085246792],
[-0.687276639992166,-1.82275359004533],
[-1.00161592325614,-0.692695765071617],
[-1.06320089194036,0.803577361347341],
[-0.927152307196776,1.67366338751788],
[-0.786802101366614,1.42564362251793],
[-0.772970884572502,0.426446388877964],
[-0.81275662801789,-0.437721213831647],
[-0.686831250382476,-0.504255468075149],
[-0.237936463020255,0.148656301898438],
[0.459631879129522,0.832925905720478],
[1.12717379822508,0.889455302576383],
[1.48640453200855,0.268042676202216],
[1.46515245776211,-0.446505038539178],
[1.22993484959115,-0.563868578181134],
[1.0272100765927,0.0996849952196907],
[0.979191212438404,1.05053652824665],
[1.00733490030391,1.51658415000556],
[0.932192535457706,1.06262774912638],
[0.643374300839414,-0.0865180803476065],
[0.186885168954461,-1.24799408923277],
[-0.290842337365465,-1.80035611156538],
[-0.669446735516495,-1.58847333561510],
[-0.928915624595538,-0.932116966867929],
[-1.11758635926997,-0.307879396807850],
[-1.26832454569756,-0.00856199983957032],
[-1.35755577149251,-0.0303537516690989],
[-1.34244112665546,-0.196807620887435],
[-1.22227976023299,-0.342062643495923],
[-1.04601473486818,-0.390474392372016],
[-0.85158508717846,-0.322164402093596],
[-0.605033439160543,-0.126930141915954],
[-0.218304303942818,0.179551077808122],
[0.352173017779006,0.512327303000081],
[1.01389600097229,0.733397490572755],
[1.55149778750607,0.748740387440165],
[1.75499674757591,0.601759717901009],
[1.56636057468633,0.457705308377562],
[1.12239792537274,0.470849913286519],
[0.655802600286141,0.646142040378738],
[0.335285115340180,0.824103600255079],
[0.173454596506888,0.808068498175582],
[0.0666753011315252,0.521488214487996],
[-0.0842367474816212,0.0583493276173476],
[-0.285604762631464,-0.405958418332253],
[-0.465735422869919,-0.747800086512926],
[-0.563586691231348,-0.94982272350799],
[-0.598110322024572,-1.04736894794361],
[-0.65216025756061,-1.04858365218822],
[-0.789663117801624,-0.924145633093637],
[-0.984704045337959,-0.670740724179446],
[-1.12449565589348,-0.359476803003931],
[-1.07878318723543,-0.092290938944355],
[-0.775555435407062,0.102132527529259],
[-0.231610677329856,0.314409560305622],
[0.463192794235131,0.663523546243286],
[1.17416973448423,1.13156902460931],
[1.74112278814906,1.48967153067024],
[2.00320855757084,1.42571085941843],
[1.8529912317336,0.802460519079555],
[1.30747261947211,-0.169219078629572],
[0.540237070403222,-1.01621539672694],
[-0.177136817092375,-1.3130784867977],
[-0.611981468823591,-0.982477824460773],
[-0.700240028737747,-0.344919609255406],
[-0.572396497740112,0.125083535035390],
[-0.450934466600975,0.142553112732280],
[-0.494020014254326,-0.211429053871656],
[-0.701707589094918,-0.599602868825992],
[-0.94721339346157,-0.710669870591623],
[-1.09297139748946,-0.47846194092245],
[-1.08850658866583,-0.082258450179988],
[-0.976082880696692,0.235758921309309],
[-0.81885695346771,0.365298185204303],
[-0.63165529525553,0.384725179378064],
[-0.37983149226421,0.460240196164378],
[-0.0375551354277652,0.68580913832794],
[0.361996927427804,0.984470835955107],
[0.739920615366072,1.13195975020298],
[1.03583478061534,0.88812510421667],
[1.25614938962160,0.172561520611839],
[1.45295030231799,-0.804979390544485],
[1.64887158748426,-1.55662011197859],
[1.78022721495313,-1.52921975346218],
[1.71945683859668,-0.462240366424548],
[1.36728880239190,1.31213774341268],
[0.740173894315912,2.88362740582926],
[-0.0205364331835904,3.20319080963167],
[-0.725643970956428,1.75222466531151],
[-1.23900506689782,-0.998432917440275],
[-1.52651897508678,-3.72752870885448],
[-1.62857516631435,-5.00551707196292],
[-1.59657420180451,-4.18499132634584],
[-1.45489013276495,-1.81759097305637],
[-1.21309542313047,0.722029457352468]])
dta = macrodata.load().data[['tbilrate','infl']].view((float,2))[1:]
cyc, trend = cffilter(dta)
assert_almost_equal(cyc, cfilt_res, 8)
#do 1d
cyc, trend = cffilter(dta[:,1])
assert_almost_equal(cyc, cfilt_res[:,1], 8)
def test_bking_pandas():
# 1d
dta = macrodata.load_pandas().data
index = Index(dates_from_range('1959Q1', '2009Q3'))
dta.index = index
filtered = bkfilter(dta["infl"])
nd_filtered = bkfilter(dta['infl'].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.name, "infl")
#2d
filtered = bkfilter(dta[["infl","unemp"]])
nd_filtered = bkfilter(dta[['infl', 'unemp']].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.columns.values, ["infl", "unemp"])
def test_cfitz_pandas():
# 1d
dta = macrodata.load_pandas().data
index = Index(dates_from_range('1959Q1', '2009Q3'))
dta.index = index
cycle, trend = cffilter(dta["infl"])
ndcycle, ndtrend = cffilter(dta['infl'].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "infl")
#2d
cycle, trend = cffilter(dta[["infl","unemp"]])
ndcycle, ndtrend = cffilter(dta[['infl', 'unemp']].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.columns.values, ["infl", "unemp"])
def test_hpfilter_pandas():
dta = macrodata.load_pandas().data
index = Index(dates_from_range('1959Q1', '2009Q3'))
dta.index = index
cycle, trend = hpfilter(dta["realgdp"])
ndcycle, ndtrend = hpfilter(dta['realgdp'].values)
assert_equal(cycle.values, ndcycle)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "realgdp")
class TestFilters(object):
@classmethod
def setupClass(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, DatetimeIndex(start='1/1/1951',
periods=len(data),
freq='Q'))
data[9] = np.nan
cls.datana = DataFrame(data, DatetimeIndex(start='1/1/1951',
periods=len(data),
freq='Q'))
from .results import filter_results
cls.expected = filter_results
def test_convolution(self):
x = self.data.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected)
def test_convolution2d(self):
x = self.data.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(np.c_[x, x], [[.75, .75], [.25, .25]])
np.testing.assert_almost_equal(res, np.c_[expected, expected])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected[:, None])
x = self.datana.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected[:, None])
def test_recursive(self):
x = self.data.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse_na
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init_na
np.testing.assert_almost_equal(res, expected)
np.testing.assert_raises(ValueError, recursive_filter, x,
[.75, .25, .5], [150, 100])
def test_pandas(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .25], nsides=1)
assert_(res.index[0] == start)
# with no nan-padding q1 if not
assert_(res.index[-1] == end)
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
x = self.datana
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_pandas2d(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = concat((self.data[0], self.data[0]), axis=1)
res = convolution_filter(x, [[.75, .75], [.25, .25]])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_odd_length_filter(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .5, .3, .2, .1])
expected = self.expected.conv2_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .5, .3, .2, .1], nsides=1)
expected = self.expected.conv1_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
# with no NAs
# not a stable filter
res = recursive_filter(x, [.75, .5, .3, .2, .1], init=[150, 100,
125, 135,
145])
expected = self.expected.recurse_odd
# only have 12 characters in R and this blows up and gets big
np.testing.assert_almost_equal(res.values.squeeze(), expected, 4)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
| bsd-3-clause |
Lyleo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/blocking_input.py | 69 | 12119 | """
This provides several classes used for blocking interaction with figure windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
import time
import numpy as np
from matplotlib import path, verbose
from matplotlib.cbook import is_sequence_of_strings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
assert is_sequence_of_strings(eventslist), "Requires a sequence of event name strings"
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks=[]
def add_event(self,event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self,index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self,index=-1):
self.pop_event(index)
pop.__doc__=pop_event.__doc__
def __call__(self, n=1, timeout=30 ):
"""
Blocking call to retrieve n events
"""
assert isinstance(n, int), "Requires an integer argument"
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append( self.fig.canvas.mpl_connect(n, self.on_event) )
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event') )
def post_event(self):
"""
This will be called to process events
"""
assert len(self.events)>0, "No events yet"
if self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == 3:
self.button3(event)
elif button == 2:
self.button2(event)
else:
self.button1(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
key = event.key
if key == 'backspace' or key == 'delete':
self.button3(event)
elif key == 'enter':
self.button2(event)
else:
self.button1(event)
def button1( self, event ):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self)
def button2( self, event ):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self)
# This will exit even if not in infinite mode. This is
# consistent with matlab and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def button3( self, event ):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self)
# Now remove any existing clicks if possible
if len(self.events)>0:
self.pop()
def add_click(self,event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata,event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks),event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
self.marks.extend(
event.inaxes.plot([event.xdata,], [event.ydata,], 'r+') )
self.fig.canvas.draw()
def pop_click(self,index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
def pop(self,index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(index)
BlockingInput.pop(self,index)
def cleanup(self):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self,n=n,timeout=timeout)
return self.clicks
class BlockingContourLabeler( BlockingMouseInput ):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self,cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure )
def button1(self,event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
cs = self.cs
if event.inaxes == cs.ax:
conmin,segmin,imin,xmin,ymin = cs.find_nearest_contour(
event.x, event.y, cs.labelIndiceList)[:5]
# Get index of nearest level in subset of levels used for labeling
lmin = cs.labelIndiceList.index(conmin)
# Coordinates of contour
paths = cs.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = cs.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = cs.get_label_width(cs.labelLevelList[lmin],
cs.labelFmt, cs.labelFontSizeList[lmin])
"""
# requires python 2.5
# Figure out label rotation.
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lc if self.inline else [],
self.inline_spacing )
"""
# Figure out label rotation.
if self.inline: lcarg = lc
else: lcarg = None
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
self.inline_spacing )
cs.add_label(xmin,ymin,rotation,cs.labelLevelList[lmin],
cs.labelCValueList[lmin])
if self.inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n)>1:
paths.append( path.Path(n) )
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self,event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
# Remove this last event - not too important for clabel use
# since clabel normally doesn't have a maximum number of
# events, but best for cleanliness sake.
BlockingInput.pop(self)
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self,inline,inline_spacing=5,n=-1,timeout=-1):
self.inline=inline
self.inline_spacing=inline_spacing
BlockingMouseInput.__call__(self,n=n,timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=('button_press_event','key_press_event') )
def post_event(self):
"""
Determines if it is a key event
"""
assert len(self.events)>0, "No events yet"
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self,n=1,timeout=timeout)
return self.keyormouse
| gpl-3.0 |
Akatsuki06/AutonomousCarAI | main.py | 1 | 3848 | import numpy as np
import pandas as pd
import pyautogui as pag
import cv2
import pyscreenshot as ImageGrab
import time
import keyboard
import win32gui, win32ui, win32con, win32api
from lib.game_position import get_position,get_screen
from lib.process_image import process_img
from lib.capture_keys import log_keys,get_keys
from lib.directions import left,right,accelerate,deaccelerate
import glob
from keras.models import load_model
import os
def train(pos):
findex=len(glob.glob('data/*frames*.csv'))+1
filename_frames='data/trainig_frames-'+str(findex)+'.csv'
filename_keys='data/training_keys-'+str(findex)+'.csv'
for i in range(1,4):
print(i ,'')
time.sleep(1)
print('writing to ' , filename_frames,' and ', filename_keys, ' ....')
print('training now...(press Q to stop)')
fps=0
training_frames=pd.DataFrame()
training_keys=pd.DataFrame()
while True:
t=time.time()
intsarray,height,width=get_screen(pos,win32gui, win32ui, win32con, win32api)
img=process_img(intsarray,height,width,np,cv2)
cv2.imshow('Training',img)
img=img.flatten()
fps+=time.time()-t
key = get_keys(win32api)
if key==0:
cv2.destroyAllWindows()
break;
training_frames=training_frames.append([img])
training_keys= training_keys.append([key])
key = cv2.waitKey(1)
if key == 27:
cv2.destroyAllWindows()
break;
print('\nfps: ',len(training_frames)/fps)
print('no of frames trained: ', len(training_frames))
#discarding some of the frames
training_frames=training_frames[10:len(training_frames)-10]
training_keys=training_keys[10:len(training_keys)-10]
training_frames.to_csv(filename_frames,index=False)
training_keys.to_csv(filename_keys,index=False,header=['w','s','a','d'])
def move(y):
maxi=0
y=y.flatten()
for i in range(0,len(y)):
if(y[i]>y[maxi]):maxi=i
print(round(y[i],2),end=',')
arr=['w','s','a','d']#no key is not required
print(arr[maxi])
if arr[maxi]=='w' : accelerate()
elif arr[maxi]=='s': deaccelerate()
elif arr[maxi]=='a': left()
elif arr[maxi]=='d': right()
def drive(pos):
model=load_model('model/model-0.h5')
for i in range(1,4):
print(i ,'')
time.sleep(1)
print('driving now...(press esc to stop)')
while True:
intsarray,height,width=get_screen(pos,win32gui, win32ui, win32con, win32api)
img=process_img(intsarray,height,width,np,cv2)
img=img.flatten()
img=np.array(img)/255
img.shape=(1,30,30,3)
y=model.predict(img)
move(y);
img.shape=(30,30,3)
cv2.imshow('Driving',img)
key = cv2.waitKey(1)
if key == 27:
cv2.destroyAllWindows()
break;
def get_pos():
pos=get_position(pag)
if pos==None:
print('loading cached frame location ...')
f=open('data/frames-pos.temp','r')
pos=eval(f.read())
f.close()
else:
f=open('data/frames-pos.temp','w+')
f.write(str(pos))
f.close()
return pos
def main():
if not os.path.isdir(os.path.dirname(os.path.abspath(__file__))+'/data'):
os.makedirs('data')
if not os.path.isdir(os.path.dirname(os.path.abspath(__file__))+'/model'):
os.makedirs('model')
while True:
pos=get_pos()
print('Frames will be captured at : ',pos)
inp=int(input('You want to Train(0) or Test(1)? Press 0 or 1. To exit press 2'))
if(inp==0):train(pos)
if(inp==1):drive(pos)
if(inp==2):break
if __name__== "__main__":
main()
| mit |
peastman/deepchem | deepchem/metrics/metric.py | 2 | 27714 | import logging
from typing import Any, Callable, Optional
import numpy as np
logger = logging.getLogger(__name__)
def threshold_predictions(y: np.ndarray,
threshold: Optional[float] = None) -> np.ndarray:
"""Threshold predictions from classification model.
Parameters
----------
y: np.ndarray
Must have shape `(N, n_classes)` and be class probabilities.
threshold: float, default None
The threshold probability for the positive class. Note that this
threshold will only be applied for binary classifiers (where
`n_classes==2`). If specified for multiclass problems, will be
ignored. If `threshold` is None, and `n_classes==2` then a default
threshold of 0.5 will be applied.
Returns
-------
y_out: np.ndarray
A numpy array of shape `(N,)` with class predictions as integers ranging from 0
to `n_classes-1`.
"""
if not isinstance(y, np.ndarray) or not len(y.shape) == 2:
raise ValueError("y must be a ndarray of shape (N, n_classes)")
N = y.shape[0]
n_classes = y.shape[1]
if threshold is None and n_classes == 2:
logger.info("Using default threshold of 0.5 for binary dataset.")
threshold = 0.5
if not np.allclose(np.sum(y, axis=1), np.ones(N)):
raise ValueError(
"y must be a class probability matrix with rows summing to 1.")
if n_classes != 2:
return np.argmax(y, axis=1)
else:
return np.where(y[:, 1] >= threshold, np.ones(N), np.zeros(N))
def normalize_weight_shape(w: Optional[np.ndarray], n_samples: int,
n_tasks: int) -> np.ndarray:
"""A utility function to correct the shape of the weight array.
This utility function is used to normalize the shapes of a given
weight array.
Parameters
----------
w: np.ndarray
`w` can be `None` or a scalar or a `np.ndarray` of shape
`(n_samples,)` or of shape `(n_samples, n_tasks)`. If `w` is a
scalar, it's assumed to be the same weight for all samples/tasks.
n_samples: int
The number of samples in the dataset. If `w` is not None, we should
have `n_samples = w.shape[0]` if `w` is a ndarray
n_tasks: int
The number of tasks. If `w` is 2d ndarray, then we should have
`w.shape[1] == n_tasks`.
Examples
--------
>>> import numpy as np
>>> w_out = normalize_weight_shape(None, n_samples=10, n_tasks=1)
>>> (w_out == np.ones((10, 1))).all()
True
Returns
-------
w_out: np.ndarray
Array of shape `(n_samples, n_tasks)`
"""
if w is None:
w_out = np.ones((n_samples, n_tasks))
elif isinstance(w, np.ndarray):
if len(w.shape) == 0:
# scalar case
w_out = w * np.ones((n_samples, n_tasks))
elif len(w.shape) == 1:
if len(w) != n_samples:
raise ValueError("Length of w isn't n_samples")
# per-example case
# This is a little arcane but it repeats w across tasks.
w_out = np.tile(w, (n_tasks, 1)).T
elif len(w.shape) == 2:
if w.shape == (n_samples, 1):
# If w.shape == (n_samples, 1) handle it as 1D
w = np.squeeze(w, axis=1)
w_out = np.tile(w, (n_tasks, 1)).T
elif w.shape != (n_samples, n_tasks):
raise ValueError("Shape for w doens't match (n_samples, n_tasks)")
else:
# w.shape == (n_samples, n_tasks)
w_out = w
else:
raise ValueError("w must be of dimension 1, 2, or 3")
else:
# scalar case
w_out = w * np.ones((n_samples, n_tasks))
return w_out
def normalize_labels_shape(y: np.ndarray,
mode: Optional[str] = None,
n_tasks: Optional[int] = None,
n_classes: Optional[int] = None) -> np.ndarray:
"""A utility function to correct the shape of the labels.
Parameters
----------
y: np.ndarray
`y` is an array of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)`.
mode: str, default None
If `mode` is "classification" or "regression", attempts to apply
data transformations.
n_tasks: int, default None
The number of tasks this class is expected to handle.
n_classes: int, default None
If specified use this as the number of classes. Else will try to
impute it as `n_classes = max(y) + 1` for arrays and as
`n_classes=2` for the case of scalars. Note this parameter only
has value if `mode=="classification"`
Returns
-------
y_out: np.ndarray
If `mode=="classification"`, `y_out` is an array of shape `(N,
n_tasks, n_classes)`. If `mode=="regression"`, `y_out` is an array
of shape `(N, n_tasks)`.
"""
if n_tasks is None:
raise ValueError("n_tasks must be specified")
if mode not in ["classification", "regression"]:
raise ValueError("mode must be either classification or regression.")
if mode == "classification" and n_classes is None:
raise ValueError("n_classes must be specified")
if not isinstance(y, np.ndarray):
raise ValueError("y must be a np.ndarray")
# Handle n_classes/n_task shape ambiguity
if mode == "classification" and len(y.shape) == 2:
if n_classes == y.shape[1] and n_tasks != 1 and n_classes != n_tasks:
raise ValueError("Shape of input doesn't match expected n_tasks=1")
elif n_classes == y.shape[1] and n_tasks == 1:
# Add in task dimension
y = np.expand_dims(y, 1)
if len(y.shape) == 1 and n_tasks != 1:
raise ValueError("n_tasks must equal 1 for a 1D set of labels.")
if (len(y.shape) == 2 or len(y.shape) == 3) and n_tasks != y.shape[1]:
raise ValueError(
"Shape of input doesn't match expected n_tasks=%d" % n_tasks)
if len(y.shape) >= 4:
raise ValueError(
"Labels y must be a float scalar or a ndarray of shape `(N,)` or "
"`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems and "
"of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)` for classification problems"
)
if len(y.shape) == 1:
# Insert a task dimension (we know n_tasks=1 from above0
y_out = np.expand_dims(y, 1)
elif len(y.shape) == 2:
y_out = y
elif len(y.shape) == 3:
# If 3D and last dimension isn't 1, assume this is one-hot encoded and return as-is.
if y.shape[-1] != 1:
return y
y_out = np.squeeze(y, axis=-1)
# Handle classification. We need to convert labels into one-hot representation.
if mode == "classification":
all_y_task = []
for task in range(n_tasks):
y_task = y_out[:, task]
# check whether n_classes is int or not
assert isinstance(n_classes, int)
y_hot = to_one_hot(y_task, n_classes=n_classes)
y_hot = np.expand_dims(y_hot, 1)
all_y_task.append(y_hot)
y_out = np.concatenate(all_y_task, axis=1)
return y_out
def normalize_prediction_shape(y: np.ndarray,
mode: Optional[str] = None,
n_tasks: Optional[int] = None,
n_classes: Optional[int] = None):
"""A utility function to correct the shape of provided predictions.
The metric computation classes expect that inputs for classification
have the uniform shape `(N, n_tasks, n_classes)` and inputs for
regression have the uniform shape `(N, n_tasks)`. This function
normalizes the provided input array to have the desired shape.
Examples
--------
>>> import numpy as np
>>> y = np.random.rand(10)
>>> y_out = normalize_prediction_shape(y, "regression", n_tasks=1)
>>> y_out.shape
(10, 1)
Parameters
----------
y: np.ndarray
If `mode=="classification"`, `y` is an array of shape `(N,)` or
`(N, n_tasks)` or `(N, n_tasks, n_classes)`. If
`mode=="regression"`, `y` is an array of shape `(N,)` or `(N,
n_tasks)`or `(N, n_tasks, 1)`.
mode: str, default None
If `mode` is "classification" or "regression", attempts to apply
data transformations.
n_tasks: int, default None
The number of tasks this class is expected to handle.
n_classes: int, default None
If specified use this as the number of classes. Else will try to
impute it as `n_classes = max(y) + 1` for arrays and as
`n_classes=2` for the case of scalars. Note this parameter only
has value if `mode=="classification"`
Returns
-------
y_out: np.ndarray
If `mode=="classification"`, `y_out` is an array of shape `(N,
n_tasks, n_classes)`. If `mode=="regression"`, `y_out` is an array
of shape `(N, n_tasks)`.
"""
if n_tasks is None:
raise ValueError("n_tasks must be specified")
if mode == "classification" and n_classes is None:
raise ValueError("n_classes must be specified")
if not isinstance(y, np.ndarray):
raise ValueError("y must be a np.ndarray")
# Handle n_classes/n_task shape ambiguity
if mode == "classification" and len(y.shape) == 2:
if n_classes == y.shape[1] and n_tasks != 1 and n_classes != n_tasks:
raise ValueError("Shape of input doesn't match expected n_tasks=1")
elif n_classes == y.shape[1] and n_tasks == 1:
# Add in task dimension
y = np.expand_dims(y, 1)
if (len(y.shape) == 2 or len(y.shape) == 3) and n_tasks != y.shape[1]:
raise ValueError(
"Shape of input doesn't match expected n_tasks=%d" % n_tasks)
if len(y.shape) >= 4:
raise ValueError(
"Predictions y must be a float scalar or a ndarray of shape `(N,)` or "
"`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems and "
"of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, n_classes)` for classification problems"
)
if mode == "classification":
if n_classes is None:
raise ValueError("n_classes must be specified.")
if len(y.shape) == 1 or len(y.shape) == 2:
# Make everything 2D so easy to handle
if len(y.shape) == 1:
y = y[:, np.newaxis]
# Handle each task separately.
all_y_task = []
for task in range(n_tasks):
y_task = y[:, task]
if len(np.unique(y_task)) > n_classes:
# Handle continuous class probabilites of positive class for binary
if n_classes > 2:
raise ValueError(
"Cannot handle continuous probabilities for multiclass problems."
"Need a per-class probability")
# Fill in class 0 probabilities
y_task = np.array([1 - y_task, y_task]).T
# Add a task dimension to concatenate on
y_task = np.expand_dims(y_task, 1)
all_y_task.append(y_task)
else:
# Handle binary labels
# make y_hot of shape (N, n_classes)
y_task = to_one_hot(y_task, n_classes=n_classes)
# Add a task dimension to concatenate on
y_task = np.expand_dims(y_task, 1)
all_y_task.append(y_task)
y_out = np.concatenate(all_y_task, axis=1)
elif len(y.shape) == 3:
y_out = y
elif mode == "regression":
if len(y.shape) == 1:
# Insert a task dimension
y_out = np.expand_dims(y, 1)
elif len(y.shape) == 2:
y_out = y
elif len(y.shape) == 3:
if y.shape[-1] != 1:
raise ValueError(
"y must be a float scalar or a ndarray of shape `(N,)` or "
"`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems.")
y_out = np.squeeze(y, axis=-1)
else:
raise ValueError("mode must be either classification or regression.")
return y_out
def handle_classification_mode(
y: np.ndarray,
classification_handling_mode: Optional[str] = None,
threshold_value: Optional[float] = None) -> np.ndarray:
"""Handle classification mode.
Transform predictions so that they have the correct classification mode.
Parameters
----------
y: np.ndarray
Must be of shape `(N, n_tasks, n_classes)`
classification_handling_mode: str, default None
DeepChem models by default predict class probabilities for
classification problems. This means that for a given singletask
prediction, after shape normalization, the DeepChem prediction will be a
numpy array of shape `(N, n_classes)` with class probabilities.
`classification_handling_mode` is a string that instructs this method
how to handle transforming these probabilities. It can take on the
following values:
- None: default value. Pass in `y_pred` directy into `self.metric`.
- "threshold": Use `threshold_predictions` to threshold `y_pred`. Use
`threshold_value` as the desired threshold.
- "threshold-one-hot": Use `threshold_predictions` to threshold `y_pred`
using `threshold_values`, then apply `to_one_hot` to output.
threshold_value: float, default None
If set, and `classification_handling_mode` is "threshold" or
"threshold-one-hot" apply a thresholding operation to values with this
threshold. This option isj only sensible on binary classification tasks.
If float, this will be applied as a binary classification value.
Returns
-------
y_out: np.ndarray
If `classification_handling_mode` is None, then of shape `(N, n_tasks, n_classes)`.
If `classification_handling_mode` is "threshold", then of shape `(N, n_tasks)`.
If `classification_handling_mode is "threshold-one-hot", then of shape `(N, n_tasks, n_classes)"
"""
if len(y.shape) != 3:
raise ValueError("y must be of shape (N, n_tasks, n_classes)")
N, n_tasks, n_classes = y.shape
if classification_handling_mode is None:
return y
elif classification_handling_mode == "threshold":
thresholded = []
for task in range(n_tasks):
task_array = y[:, task, :]
# Now of shape (N,)
task_array = threshold_predictions(task_array, threshold_value)
# Now of shape (N, 1)
task_array = np.expand_dims(task_array, 1)
thresholded.append(task_array)
# Returns shape (N, n_tasks)
return np.concatenate(thresholded, axis=1)
elif classification_handling_mode == "threshold-one-hot":
thresholded = []
for task in range(n_tasks):
task_array = y[:, task, :]
# Now of shape (N,)
task_array = threshold_predictions(task_array, threshold_value)
# Now of shape (N, n_classes)
task_array = to_one_hot(task_array, n_classes=n_classes)
# Now of shape (N, 1, n_classes)
task_array = np.expand_dims(task_array, 1)
thresholded.append(task_array)
# Returns shape (N, n_tasks, n_classes)
return np.concatenate(thresholded, axis=1)
else:
raise ValueError(
"classification_handling_mode must be one of None, threshold, threshold-one-hot"
)
def to_one_hot(y: np.ndarray, n_classes: int = 2) -> np.ndarray:
"""Transforms label vector into one-hot encoding.
Turns y into vector of shape `(N, n_classes)` with a one-hot
encoding. Assumes that `y` takes values from `0` to `n_classes - 1`.
Parameters
----------
y: np.ndarray
A vector of shape `(N,)` or `(N, 1)`
n_classes: int, default 2
If specified use this as the number of classes. Else will try to
impute it as `n_classes = max(y) + 1` for arrays and as
`n_classes=2` for the case of scalars. Note this parameter only
has value if `mode=="classification"`
Returns
-------
np.ndarray
A numpy array of shape `(N, n_classes)`.
"""
if len(y.shape) > 2:
raise ValueError("y must be a vector of shape (N,) or (N, 1)")
if len(y.shape) == 2 and y.shape[1] != 1:
raise ValueError("y must be a vector of shape (N,) or (N, 1)")
if len(np.unique(y)) > n_classes:
raise ValueError("y has more than n_class unique elements.")
N = np.shape(y)[0]
y_hot = np.zeros((N, n_classes))
y_hot[np.arange(N), y.astype(np.int64)] = 1
return y_hot
def from_one_hot(y: np.ndarray, axis: int = 1) -> np.ndarray:
"""Transforms label vector from one-hot encoding.
Parameters
----------
y: np.ndarray
A vector of shape `(n_samples, num_classes)`
axis: int, optional (default 1)
The axis with one-hot encodings to reduce on.
Returns
-------
np.ndarray
A numpy array of shape `(n_samples,)`
"""
return np.argmax(y, axis=axis)
class Metric(object):
"""Wrapper class for computing user-defined metrics.
The `Metric` class provides a wrapper for standardizing the API
around different classes of metrics that may be useful for DeepChem
models. The implementation provides a few non-standard conveniences
such as built-in support for multitask and multiclass metrics.
There are a variety of different metrics this class aims to support.
Metrics for classification and regression that assume that values to
compare are scalars are supported.
At present, this class doesn't support metric computation on models
which don't present scalar outputs. For example, if you have a
generative model which predicts images or molecules, you will need
to write a custom evaluation and metric setup.
"""
def __init__(self,
metric: Callable[..., float],
task_averager: Optional[Callable[..., Any]] = None,
name: Optional[str] = None,
threshold: Optional[float] = None,
mode: Optional[str] = None,
n_tasks: Optional[int] = None,
classification_handling_mode: Optional[str] = None,
threshold_value: Optional[float] = None):
"""
Parameters
----------
metric: function
Function that takes args y_true, y_pred (in that order) and
computes desired score. If sample weights are to be considered,
`metric` may take in an additional keyword argument
`sample_weight`.
task_averager: function, default None
If not None, should be a function that averages metrics across
tasks.
name: str, default None
Name of this metric
threshold: float, default None (DEPRECATED)
Used for binary metrics and is the threshold for the positive
class.
mode: str, default None
Should usually be "classification" or "regression."
n_tasks: int, default None
The number of tasks this class is expected to handle.
classification_handling_mode: str, default None
DeepChem models by default predict class probabilities for
classification problems. This means that for a given singletask
prediction, after shape normalization, the DeepChem prediction will be a
numpy array of shape `(N, n_classes)` with class probabilities.
`classification_handling_mode` is a string that instructs this method
how to handle transforming these probabilities. It can take on the
following values:
- None: default value. Pass in `y_pred` directy into `self.metric`.
- "threshold": Use `threshold_predictions` to threshold `y_pred`. Use
`threshold_value` as the desired threshold.
- "threshold-one-hot": Use `threshold_predictions` to threshold `y_pred`
using `threshold_values`, then apply `to_one_hot` to output.
threshold_value: float, default None
If set, and `classification_handling_mode` is "threshold" or
"threshold-one-hot" apply a thresholding operation to values with this
threshold. This option is only sensible on binary classification tasks.
If float, this will be applied as a binary classification value.
"""
if threshold is not None:
logger.warn(
"threshold is deprecated and will be removed in a future version of DeepChem."
"Set threshold in compute_metric instead.")
self.metric = metric
if task_averager is None:
self.task_averager = np.mean
else:
self.task_averager = task_averager
if name is None:
if task_averager is None:
if hasattr(self.metric, '__name__'):
self.name = self.metric.__name__
else:
self.name = "unknown metric"
else:
if hasattr(self.metric, '__name__'):
self.name = task_averager.__name__ + "-" + self.metric.__name__
else:
self.name = "unknown metric"
else:
self.name = name
if mode is None:
# These are some smart defaults
if self.metric.__name__ in [
"roc_auc_score", "matthews_corrcoef", "recall_score",
"accuracy_score", "kappa_score", "cohen_kappa_score",
"precision_score", "balanced_accuracy_score", "prc_auc_score",
"f1_score", "bedroc_score", "jaccard_score", "jaccard_index",
"pixel_error"
]:
mode = "classification"
# These are some smart defaults corresponding to sklearn's required
# behavior
if classification_handling_mode is None:
if self.metric.__name__ in [
"matthews_corrcoef", "cohen_kappa_score", "kappa_score",
"balanced_accuracy_score", "recall_score", "jaccard_score",
"jaccard_index", "pixel_error", "f1_score"
]:
classification_handling_mode = "threshold"
elif self.metric.__name__ in [
"accuracy_score", "precision_score", "bedroc_score"
]:
classification_handling_mode = "threshold-one-hot"
elif self.metric.__name__ in ["roc_auc_score", "prc_auc_score"]:
classification_handling_mode = None
elif self.metric.__name__ in [
"pearson_r2_score", "r2_score", "mean_squared_error",
"mean_absolute_error", "rms_score", "mae_score", "pearsonr",
"concordance_index"
]:
mode = "regression"
else:
raise ValueError(
"Please specify the mode of this metric. mode must be 'regression' or 'classification'"
)
self.mode = mode
self.n_tasks = n_tasks
if classification_handling_mode not in [
None, "threshold", "threshold-one-hot"
]:
raise ValueError(
"classification_handling_mode must be one of None, 'threshold', 'threshold_one_hot'"
)
self.classification_handling_mode = classification_handling_mode
self.threshold_value = threshold_value
def compute_metric(self,
y_true: np.ndarray,
y_pred: np.ndarray,
w: Optional[np.ndarray] = None,
n_tasks: Optional[int] = None,
n_classes: int = 2,
per_task_metrics: bool = False,
use_sample_weights: bool = False,
**kwargs) -> Any:
"""Compute a performance metric for each task.
Parameters
----------
y_true: np.ndarray
An np.ndarray containing true values for each task. Must be of shape
`(N,)` or `(N, n_tasks)` or `(N, n_tasks, n_classes)` if a
classification metric. If of shape `(N, n_tasks)` values can either be
class-labels or probabilities of the positive class for binary
classification problems. If a regression problem, must be of shape
`(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)` if a regression metric.
y_pred: np.ndarray
An np.ndarray containing predicted values for each task. Must be
of shape `(N, n_tasks, n_classes)` if a classification metric,
else must be of shape `(N, n_tasks)` if a regression metric.
w: np.ndarray, default None
An np.ndarray containing weights for each datapoint. If
specified, must be of shape `(N, n_tasks)`.
n_tasks: int, default None
The number of tasks this class is expected to handle.
n_classes: int, default 2
Number of classes in data for classification tasks.
per_task_metrics: bool, default False
If true, return computed metric for each task on multitask dataset.
use_sample_weights: bool, default False
If set, use per-sample weights `w`.
kwargs: dict
Will be passed on to self.metric
Returns
-------
np.ndarray
A numpy array containing metric values for each task.
"""
# Attempt some limited shape imputation to find n_tasks
if n_tasks is None:
if self.n_tasks is None and isinstance(y_true, np.ndarray):
if len(y_true.shape) == 1:
n_tasks = 1
elif len(y_true.shape) >= 2:
n_tasks = y_true.shape[1]
else:
n_tasks = self.n_tasks
# check whether n_tasks is int or not
# This is because `normalize_weight_shape` require int value.
assert isinstance(n_tasks, int)
y_true = normalize_labels_shape(
y_true, mode=self.mode, n_tasks=n_tasks, n_classes=n_classes)
y_pred = normalize_prediction_shape(
y_pred, mode=self.mode, n_tasks=n_tasks, n_classes=n_classes)
if self.mode == "classification":
y_true = handle_classification_mode(
y_true, self.classification_handling_mode, self.threshold_value)
y_pred = handle_classification_mode(
y_pred, self.classification_handling_mode, self.threshold_value)
n_samples = y_true.shape[0]
w = normalize_weight_shape(w, n_samples, n_tasks)
computed_metrics = []
for task in range(n_tasks):
y_task = y_true[:, task]
y_pred_task = y_pred[:, task]
w_task = w[:, task]
metric_value = self.compute_singletask_metric(
y_task,
y_pred_task,
w_task,
use_sample_weights=use_sample_weights,
**kwargs)
computed_metrics.append(metric_value)
logger.info("computed_metrics: %s" % str(computed_metrics))
if n_tasks == 1:
# FIXME: Incompatible types in assignment
computed_metrics = computed_metrics[0] # type: ignore
if not per_task_metrics:
return self.task_averager(computed_metrics)
else:
return self.task_averager(computed_metrics), computed_metrics
def compute_singletask_metric(self,
y_true: np.ndarray,
y_pred: np.ndarray,
w: Optional[np.ndarray] = None,
n_samples: Optional[int] = None,
use_sample_weights: bool = False,
**kwargs) -> float:
"""Compute a metric value.
Parameters
----------
y_true: `np.ndarray`
True values array. This array must be of shape `(N,
n_classes)` if classification and `(N,)` if regression.
y_pred: `np.ndarray`
Predictions array. This array must be of shape `(N, n_classes)`
if classification and `(N,)` if regression.
w: `np.ndarray`, default None
Sample weight array. This array must be of shape `(N,)`
n_samples: int, default None (DEPRECATED)
The number of samples in the dataset. This is `N`. This argument is
ignored.
use_sample_weights: bool, default False
If set, use per-sample weights `w`.
kwargs: dict
Will be passed on to self.metric
Returns
-------
metric_value: float
The computed value of the metric.
"""
if n_samples is not None:
logger.warning("n_samples is a deprecated argument which is ignored.")
# Attempt to convert both into the same type
if self.mode == "regression":
if len(y_true.shape) != 1 or len(
y_pred.shape) != 1 or len(y_true) != len(y_pred):
raise ValueError(
"For regression metrics, y_true and y_pred must both be of shape (N,)"
)
elif self.mode == "classification":
pass
# if len(y_true.shape) != 2 or len(y_pred.shape) != 2 or y_true.shape != y_pred.shape:
# raise ValueError("For classification metrics, y_true and y_pred must both be of shape (N, n_classes)")
else:
raise ValueError(
"Only classification and regression are supported for metrics calculations."
)
if use_sample_weights:
metric_value = self.metric(y_true, y_pred, sample_weight=w, **kwargs)
else:
metric_value = self.metric(y_true, y_pred, **kwargs)
return metric_value
| mit |
tawsifkhan/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
plissonf/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
blueburningcoder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
chiffa/numpy | numpy/fft/fftpack.py | 5 | 45622 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a).astype(complex, copy=False)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
NASLab/GroundROS | src/experimental_results/path_planning_analysis.py | 1 | 5974 | # python experimental tests for Husky
from numpy import sin, cos, pi, load
import matplotlib.pyplot as plt
yaw_bound = 2 * pi / 180
yaw_calibrate = pi / 180 * (0)
x_offset_calibrate = 0
y_offset_calibrate = -.08
f0 = plt.figure()
ax0 = f0.add_subplot(111)
env_data = load('env.npy')[1:]
x = [[]] * len(env_data)
y = [[]] * len(env_data)
m=2
for i in range(m, len(env_data) - m):
if len(env_data[i]) > 0:
x[i] = env_data[i][0]
y[i] = env_data[i][1]
yaw = env_data[i][2]
# filter some of the readings; comment to see the effect
if len(env_data[i + m]) == 0 or abs(yaw - env_data[i - m][2]) > yaw_bound or abs(yaw - env_data[i + m][2]) > yaw_bound:
continue
readings = env_data[i][3]
readings_x = [[]] * len(readings)
readings_y = [[]] * len(readings)
k = 0
for j in range(len(readings)):
# lidar readings in lidar frame
x_temp = readings[j][0] * cos(-readings[j][1])
y_temp = readings[j][0] * sin(-readings[j][1])
# lidar readings in robot frame
x_temp2 = x_temp * \
cos(yaw_calibrate) - y_temp * \
sin(yaw_calibrate) + x_offset_calibrate
y_temp2 = y_temp * \
cos(yaw_calibrate) + x_temp * \
sin(yaw_calibrate) + y_offset_calibrate
# lidar readings in global frame
readings_x[k] = x_temp2 * cos(yaw) - y_temp2 * sin(yaw) + x[i]
readings_y[k] = y_temp2 * cos(yaw) + x_temp2 * sin(yaw) + y[i]
k += 1
ax0.plot(readings_x, readings_y, 'r.')
ax0.plot([], [], 'r.', label='Lidar Reading')
env_data = load('planner_of_agent_0.npy')[1:]
x = [[]] * len(env_data)
y = [[]] * len(env_data)
for i in range(1, len(env_data) - 1):
if len(env_data[i]) > 0:
x[i] = env_data[i][0]
y[i] = env_data[i][1]
ax0.plot([value for value in x if value],
[value for value in y if value], 'go', lw=3, label='Robot\'s Trajectory')
env_data = load('planner_of_agent_1.npy')[1:]
x = [[]] * len(env_data)
y = [[]] * len(env_data)
for i in range(1, len(env_data) - 1):
if len(env_data[i]) > 0:
x[i] = env_data[i][0]
y[i] = env_data[i][1]
ax0.plot([value for value in x if value],
[value for value in y if value], 'bo', lw=3, label='Robot\'s Trajectory')
ax0.legend()
ax0.axis('equal')
plt.draw()
plt.pause(.1)
raw_input("<Hit Enter To Close>")
plt.close(f0)
# yaw_bound = 3 * pi / 180
# yaw_calibrate = pi / 180 * (0)
# x_offset_calibrate = .23
# y_offset_calibrate = -.08
# data = np.load('pos.npy')[1:]
# print len(data)
# error_long = data[:, 0]
# error_lat = data[:, 1]
# ref_x = [value for value in data[:, 2]]
# print ref_x[:30]
# ref_y = [value for value in data[:, 3]]
# pos_x = [value for value in data[:, 4]][0::1]
# pos_y = [value for value in data[:, 5]][0::1]
# pos_theta = data[:, 6]
# print data
# time = data[:, 7] - data[0, 7]
# vel = data[:, 8]
# plt.plot(ref_x, ref_y, 'ro')
# plt.gca().set_aspect('equal', adjustable='box')
# f0 = plt.figure(1, figsize=(9, 9))
# ax0 = f0.add_subplot(111)
# ax0.plot(ref_x, ref_y, '--', lw=3, label='Reference Trajectory')
# ax0.plot(pos_x[0], pos_y[0], 'ms', markersize=10, label='Start Point')
# ax0.plot(pos_x, pos_y, 'go', label='Robot Trajectory')
# env_data = np.load('planner_of_agent_0.npy')[1:]
# x = [[]] * len(env_data)
# y = [[]] * len(env_data)
# print len(env_data)
# for i in range(1, len(env_data) - 1):
# if len(env_data[i]) > 0:
# x[i] = env_data[i][0]
# y[i] = env_data[i][1]
# yaw = env_data[i][2]
# filter some of the readings; comment to see the effect
# if len(env_data[i + 1]) == 0 or abs(yaw - env_data[i - 1][2]) > yaw_bound or abs(yaw - env_data[i + 1][2]) > yaw_bound:
# continue
# readings = env_data[i][3]
# readings_x = [[]] * len(readings)
# readings_y = [[]] * len(readings)
# k = 0
# for j in range(len(readings)):
# lidar readings in lidar frame
# x_temp = readings[j][0] * cos(-readings[j][1])
# y_temp = readings[j][0] * sin(-readings[j][1])
# lidar readings in robot frame
# x_temp2 = x_temp * \
# cos(yaw_calibrate) - y_temp * \
# sin(yaw_calibrate) + x_offset_calibrate
# y_temp2 = y_temp * \
# cos(yaw_calibrate) + x_temp * \
# sin(yaw_calibrate) + y_offset_calibrate
# lidar readings in global frame
# readings_x[k] = x_temp2 * cos(yaw) - y_temp2 * sin(yaw) + x[i]
# readings_y[k] = y_temp2 * cos(yaw) + x_temp2 * sin(yaw) + y[i]
# k += 1
# ax0.plot(readings_x, readings_y, 'r.')
# for i in range(len(env_data)):
# if len(env_data[i])>0:
# x[i] = env_data[i][0]
# y[i] = env_data[i][1]
# yaw = env_data[i][2]
# print yaw
# readings = env_data[i][3]
# readings_x = [[]]*len(readings)
# readings_y = [[]]*len(readings)
# print len(readings),len(readings_x)
# k=0
# for j in range(len(readings)):
# if i<200:
# print k,j,len(readings_x)
# readings_x[k] = x[i] + readings[j][0]*sin(pi/2-yaw+readings[j][1])
# readings_y[k] = y[i] + readings[j][0]*cos(pi/2-yaw+readings[j][1])
# k+=1
# ax0.plot(readings_x, readings_y,'r.')
# ax0.plot([], [], 'r.', label='Lidar Reading')
# print x
# ax0.plot([value for value in x if value],
# [value for value in y if value], 'go', lw=3,label='Robot\'s Trajectory')
# env_y = np.load('env.npy')[1]
# env_x = [value for value in env_x if value]
# env_y = [value for value in env_y if value]
# ax0.plot(env_x, env_y, 'r.', )
# ax0.plot(-.5, 2.7, 'cs', markersize=10, label='Destination')
# ax0.legend()
# ax0.axis('equal')
# ax0.set_xlim(-3.5, 3.5)
# ax0.set_ylim(-3, 4)
# ax0.set_xlabel('X (m)')
# ax0.set_ylabel('Y (m)')
# ax0.axis('equal')
# plt.tight_layout()
# plt.draw()
# plt.pause(.1) # <-------
# raw_input("<Hit Enter To Close>")
# plt.close(f0)
| mit |
phobson/pybmp | pybmpdb/summary.py | 2 | 21365 | import os
import numpy
import matplotlib
from matplotlib import pyplot
import seaborn
from statsmodels.tools.decorators import (
resettable_cache, cache_readonly
)
import wqio
from . import bmpdb, utils
def filterlocation(location, count=5, column='bmp'):
location.filtered_data = (
location.filtered_data
.groupby(level=column)
.filter(lambda g: g.count() >= count)
)
location.include = (
location.filtered_data
.index
.get_level_values(column)
.unique()
.shape[0]
) >= count
class DatasetSummary(object):
def __init__(self, dataset, paramgroup, figpath, forcepaths=False):
self.forcepaths = forcepaths
self.figpath = figpath
self.paramgroup = paramgroup
self.ds = dataset
self.parameter = self.ds.definition['parameter']
self.parameter.usingTex = True
self.bmp = self.ds.definition['category']
# properties
self._latex_file_name = None
self._scatter_fig_path = None
self._scatter_fig_name = None
self._stat_fig_path = None
self._stat_fig_name = None
@property
def latex_file_name(self):
if self._latex_file_name is None:
self._latex_file_name = utils.processFilename('{}_{}_{}'.format(
self.paramgroup, self.bmp, self.parameter.name
)).lower()
return self._latex_file_name
@latex_file_name.setter
def latex_file_name(self, value):
self._latex_file_name = value
@property
def scatter_fig_path(self):
if self._scatter_fig_path is None:
self._scatter_fig_path = self.figpath + '/scatterplot'
if not os.path.exists(self._scatter_fig_path) and self.forcepaths:
os.mkdir(self._scatter_fig_path)
return self._scatter_fig_path
@property
def scatter_fig_name(self):
if self._scatter_fig_name is None:
figname = utils.processFilename('{}_scatter.pdf'.format(self.latex_file_name))
self._scatter_fig_name = self.scatter_fig_path + '/' + figname
return self._scatter_fig_name
@scatter_fig_name.setter
def scatter_fig_name(self, value):
self._scatter_fig_name = value
@property
def stat_fig_path(self):
if self._stat_fig_path is None:
self._stat_fig_path = self.figpath + '/statplot'
if not os.path.exists(self._stat_fig_path) and self.forcepaths:
os.mkdir(self._stat_fig_path)
return self._stat_fig_path
@property
def stat_fig_name(self):
if self._stat_fig_name is None:
figname = utils.processFilename('{}_stats.pdf'.format(self.latex_file_name))
self._stat_fig_name = self.stat_fig_path + '/' + figname
return self._stat_fig_name
@stat_fig_name.setter
def stat_fig_name(self, value):
self._stat_fig_name = value
def _tex_table_row(self, name, attribute, rule='mid', twoval=False,
sigfigs=3, ci=False, fromdataset=False, pval=False,
tex=False, forceint=False):
rulemap = {
'top': '\\toprule',
'mid': '\\midrule',
'bottom': '\\bottomrule',
'none': '%%',
}
try:
thisrule = rulemap[rule]
except KeyError:
raise KeyError('top, mid, bottom rules or none allowed')
if fromdataset:
if self.ds.effluent.include and self.ds.influent.include:
val = wqio.utils.sigFigs(getattr(self.ds, attribute), sigfigs,
pval=pval, tex=tex, forceint=forceint)
else:
val = 'NA'
formatter = dict(ruler=thisrule, name=name, value=val)
row = r"""
{ruler}
{name} & \multicolumn{{2}}{{c}} {{{value}}} \\"""
else:
valstrings = []
for loc in [self.ds.influent, self.ds.effluent]:
if loc.include:
if hasattr(attribute, 'append'):
val = [getattr(loc, attr)
for attr in attribute]
else:
val = getattr(loc, attribute)
if val is not None:
if twoval:
thisstring = '{}; {}'.format(
wqio.utils.sigFigs(val[0], sigfigs, pval=pval,
tex=tex, forceint=forceint),
wqio.utils.sigFigs(val[1], sigfigs, pval=pval,
tex=tex, forceint=forceint)
)
if ci:
thisstring = '({})'.format(thisstring)
else:
thisstring = wqio.utils.sigFigs(
val, sigfigs, pval=pval,
tex=tex, forceint=forceint
)
else:
thisstring = 'NA'
else:
thisstring = 'NA'
valstrings.append(thisstring)
formatter = dict(
ruler=thisrule,
name=name,
val_in=valstrings[0],
val_out=valstrings[1]
)
row = r"""
{ruler}
{name} & {val_in} & {val_out} \\"""
return row.format(**formatter)
def _make_tex_table(self, tabletitle):
'''
Generate a LaTeX table comparing the stats of `self.influent`
and `self.effluent`.
Parameters
----------
tabletitle : string
Title of the table as it should appear in a LaTeX document.
Writes
------
Returns
-------
stattable : string
The LaTeX commands for the statsummary table.
'''
stattable = r"""
\begin{table}[h!]
\caption{%s}
\centering
\begin{tabular}{l l l l l}
\toprule
\textbf{Statistic} & \textbf{Inlet} & \textbf{Outlet} \\""" % tabletitle
stats = [
{'name': 'Count', 'attribute': 'N', 'rule': 'top', 'forceint': True},
{'name': 'Number of NDs', 'attribute': 'ND', 'forceint': True},
{'name': 'Min; Max', 'attribute': ['min', 'max'], 'twoval': True},
{'name': 'Mean', 'attribute': 'mean', },
{
'name': '(95\% confidence interval)',
'attribute': 'mean_conf_interval',
'twoval': True, 'ci': True, 'rule': 'none'
},
{'name': 'Standard Deviation', 'attribute': 'std', },
{'name': 'Log. Mean', 'attribute': 'logmean', },
{
'name': '(95\% confidence interval)',
'attribute': 'logmean_conf_interval',
'twoval': True, 'ci': True, 'rule': 'none'
},
{'name': 'Log. Standard Deviation', 'attribute': 'logstd', },
{'name': 'Geo. Mean', 'attribute': 'geomean', },
{
'name': '(95\% confidence interval)',
'attribute': 'geomean_conf_interval',
'twoval': True, 'ci': True, 'rule': 'none'
},
{'name': 'Coeff. of Variation', 'attribute': 'cov', },
{'name': 'Skewness', 'attribute': 'skew', },
{'name': 'Median', 'attribute': 'median', },
{
'name': '(95\% confidence interval)',
'attribute': 'median_conf_interval',
'twoval': True, 'ci': True, 'rule': 'none'
},
{
'name': 'Quartiles',
'attribute': ['pctl25', 'pctl75'],
'twoval': True,
},
{
'name': 'Number of Pairs', 'attribute': 'n_pairs',
'rule': 'top', 'fromdataset': True,
'sigfigs': 1, 'forceint': True
},
{
'name': 'Wilcoxon p-value', 'attribute': 'wilcoxon_p',
'fromdataset': True, 'pval': True, 'tex': True
},
{
'name': 'Mann-Whitney p-value', 'attribute': 'mannwhitney_p',
'fromdataset': True, 'pval': True, 'tex': True
},
]
for s in stats:
stattable += self._tex_table_row(**s)
stattable += r"""
\bottomrule
\end{tabular}
\end{table}"""
return stattable + '\n'
# doesn't need to be a class method yet
def _make_tex_figure(self, filename, caption, position='hb', clearpage=True):
'''
Create the LaTeX for include a figure in a document
Parameters
----------
filename : string
Path to the image you want to include
caption : string
Caption tp appear below the figure
position : string (default = 'hb')
Valid LaTeX "float" placement preference
(h=here, b=bottom, t=top, !=priority)
clearpage : bool (default = True)
Toggles the LaTeX command "\clearpage" after the figure
Writes
------
None
Returns
-------
figurestring : string
The LaTeX string to include a figure in a document
'''
if clearpage:
clrpage = ' \\clearpage\n'
else:
clrpage = '\n'
figurestring = r"""
\begin{figure}[%s] %% FIGURE
\centering
\includegraphics[scale=1.00]{%s}
\caption{%s}
\end{figure}%s""" % (position, filename, caption, clrpage)
return figurestring
def makeTexInput(self, tabletitle, subsection=True):
'''
Creates an input file for a dataset including a
summary table, stat plot, and scatter plot.
Parameters
----------
figpath : string
Path to teh figure relative to the current directory
subsection : bool (default = True)
Toggles the data going in its own subsection in the document
Writes
------
A full LaTeX input file for inclusion in a final or draft template
Returns
-------
filename : string
Filename and path of the file that is written
'''
tablestring = ''
# if there's enough effluent data
if self.ds.effluent.include:
if subsection:
tablestring += r'\subsection{%s}' % (self.bmp,)
# caption for the stats plot
prob_caption = 'Box and Probability Plots of {} at {} BMPs'.format(
self.parameter.name,
self.bmp
)
# caption for the scatter plot
scatter_caption = 'Influent vs. Effluent Plots of {} at {} BMPs'.format(
self.parameter.name,
self.bmp
)
# warning about having a lot of non-detects
warning = '''
Warning: there is a very high percentage of non-detects in
this data set. The hypothesis test results and other
statistics reported in this table may not be valid.
'''
# make the table and write it to the output file
tablestring += self._make_tex_table(tabletitle)
# if less than 80% of the data is ND
if self.ds.effluent.ND / self.ds.effluent.N <= 0.8:
# make the stat plot string
statfig = self._make_tex_figure(
self.stat_fig_name, prob_caption, clearpage=False
)
# make the scatter plot string
scatterfig = self._make_tex_figure(
self.scatter_fig_name, scatter_caption, clearpage=True
)
# write the strings to the file
tablestring += statfig
tablestring += scatterfig
else:
# if there are too many non-detect,
# issue the warning
tablestring += warning
return tablestring
class CategoricalSummary(object):
def __init__(self, datasets, paramgroup, basepath, figpath,
showprogress=False, applyfilters=False,
filtercount=5, filtercolumn='bmp'):
self._cache = resettable_cache()
self._applyfilters = applyfilters
self.filtercount = filtercount
self.filtercolumn = filtercolumn
self._raw_datasets = [ds for ds in filter(
lambda x: x.effluent.include,
datasets
)]
self.basepath = basepath
self.figpath = figpath
self.showprogress = showprogress
self.parameters = [ds.definition['parameter'] for ds in self.datasets]
self.bmps = [ds.definition['category'] for ds in self.datasets]
self.paramgroup = paramgroup
@cache_readonly
def datasets(self):
if self._applyfilters:
filtered_datasets = []
for ds in self._raw_datasets:
filterlocation(ds.effluent, count=self.filtercount,
column=self.filtercolumn)
filterlocation(ds.influent, count=self.filtercount,
column=self.filtercolumn)
ds.include = ds.effluent.include
if ds.include:
filtered_datasets.append(ds)
else:
filtered_datasets = self._raw_datasets
return filtered_datasets
def _make_input_file_IO(self, inputIO, regenfigs=True):
figoptions = dict(dpi=600, bbox_inches='tight', transparent=True)
if self.showprogress:
pbar = utils.ProgressBar(self.datasets)
old_param = 'pure garbage'
for n, ds in enumerate(self.datasets, 1):
dsum = DatasetSummary(ds, self.paramgroup, self.figpath)
new_param = dsum.parameter.name
tabletitle = 'Statistics for {} at {} BMPs'.format(
dsum.parameter.paramunit(), dsum.bmp
)
latex_input = ''
if old_param != new_param:
latex_input = '\\section{%s}\n' % dsum.parameter.name
latex_input += dsum.makeTexInput(tabletitle, subsection=True)
latex_input += '\\clearpage\n'
if regenfigs:
statfig = ds.statplot(
ylabel=dsum.parameter.paramunit(),
bacteria=(self.paramgroup == 'Bacteria'),
axtype='prob'
)
scatterfig = ds.scatterplot(
xlabel='Influent ' + dsum.parameter.paramunit(),
ylabel='Effluent ' + dsum.parameter.paramunit(),
one2one=True
)
statpath = os.path.join(self.basepath, dsum.stat_fig_name)
statfig.savefig(statpath, **figoptions)
scatterpath = os.path.join(self.basepath, dsum.scatter_fig_name)
scatterfig.savefig(scatterpath, **figoptions)
inputIO.write(latex_input)
pyplot.close('all')
old_param = new_param
if self.showprogress:
pbar.animate(n)
def _make_report_IO(self, templateIO, inputpath, reportIO, report_title):
inputname = os.path.basename(inputpath)
documentstring = templateIO.read().replace('__VARTITLE', report_title)
documentstring += '\n\\input{%s}\n\\end{document}\n' % (inputname,)
reportIO.write(documentstring)
def makeReport(self, templatepath, inputpath, reportpath, report_title,
regenfigs=True):
with open(inputpath, 'w') as inputIO:
self._make_input_file_IO(inputIO, regenfigs=regenfigs)
with open(templatepath, 'r') as templateIO:
with open(reportpath, 'w') as reportIO:
self._make_report_IO(
templateIO,
inputpath,
reportIO,
report_title
)
def _proxy_inflow_outflow(dataset):
from matplotlib.lines import Line2D
infl_color = dataset.influent.color
infl = Line2D([], [], color=infl_color, linestyle='-', linewidth=1.75,
marker='o', markerfacecolor='white',
markeredgewidth=1.25, markeredgecolor=infl_color)
effl_color = dataset.effluent.color
effl = Line2D([], [], color=effl_color, linestyle='-', linewidth=1.75,
marker='s', markerfacecolor='white',
markeredgewidth=1.25, markeredgecolor=effl_color)
return infl, effl
def categorical_boxplots(dc, outpath='.'):
param = None
bmplabels = sorted(dc.tidy['category'].unique())
matplotlib.rc("lines", markeredgewidth=0.5)
# positions of the ticks
bmppositions = numpy.arange(1, len(bmplabels) + 1) * 2
pos_map = dict(zip(bmplabels, bmppositions))
paramunits = (
dc.tidy[['parameter', 'paramgroup', 'units']]
.drop_duplicates()
.to_dict(orient='records')
)
for pu in paramunits:
parameter = pu['parameter']
group = pu['paramgroup']
units = pu['units']
param = wqio.Parameter(name=parameter, units=units)
fig, ax = pyplot.subplots(figsize=(6.5, 4))
datasets = dc.selectDatasets('inflow', 'outflow', parameter=parameter)
infl_proxy = None
for n, ds in enumerate(datasets):
pos = pos_map[ds.definition['category']]
if ds is not None:
bp = ds.boxplot(ax=ax, yscale='log', width=0.45, bothTicks=False,
bacteria=group == 'Biological',
pos=pos, offset=0.25,
patch_artist=True)
if infl_proxy is None:
infl_proxy, effl_proxy = _proxy_inflow_outflow(ds)
ax.set_xticks(bmppositions)
ax.set_xticklabels([x.replace('/', '/\n') for x in bmplabels])
ax.set_ylabel(param.paramunit())
ax.set_xlabel('')
ax.yaxis.grid(True, which='major', color='0.5', linestyle='-')
ax.yaxis.grid(False, which='minor')
wqio.viz.rotateTickLabels(ax, 45, 'x')
ax.set_xlim(left=1, right=bmppositions.max() + 1)
if infl_proxy is not None:
ax.legend(
(infl_proxy, effl_proxy),
('Influent', 'Effluent'),
ncol=2,
frameon=False,
bbox_to_anchor=(1.0, 1.1)
)
fig.tight_layout()
seaborn.despine(fig)
fname = '{}_{}_boxplots.png'.format(group, parameter.replace(', ', ''))
fig.savefig(os.path.join(outpath, fname), dpi=600, bbox_inches='tight', transparent=False)
pyplot.close(fig)
def _get_fmt(paramgroup):
if paramgroup == 'Solids':
return lambda x: '{:.1f}'.format(x)
elif paramgroup == 'Biological':
return lambda x: wqio.utils.sigFigs(x, n=2)
else:
return lambda x: '{:.2f}'.format(x)
def categorical_stats(dc, simple=False):
return (
dc.data
.loc[:, dc.groupcols + ['bmp_id']]
.drop_duplicates()
.groupby(dc.groupcols)
.size()
.unstack(level='station')
.fillna(0).astype(int)
.pipe(wqio.utils.add_column_level, 'BMPs', 'result')
.swaplevel(axis='columns')
.join(dc.count.fillna(0).astype(int))
.join(dc.percentile(25).round(2))
.join(dc.median.round(2))
.join(dc.percentile(75).round(2))
.pipe(wqio.utils.flatten_columns)
.assign(diff_medianci=~wqio.utils.checkIntervalOverlap(
dc.median['inflow'], dc.median['outflow'], axis=1, oneway=False)
)
.assign(diff_mannwhitney=(dc.mann_whitney['pvalue'] < 0.05).xs(('inflow', 'outflow'), level=['station_1', 'station_2']))
.assign(diff_wilcoxon=(dc.wilcoxon['pvalue'] < 0.05).xs(('inflow', 'outflow'), level=['station_1', 'station_2']))
.assign(diff_symbol=lambda df: wqio.utils.symbolize_bools(
df.loc[:, lambda df: df.columns.map(lambda c: c.startswith('diff'))],
true_symbol='◆', false_symbol='◇', other_symbol='✖', join_char=' '
))
.pipe(wqio.utils.expand_columns, sep='_', names=['result', 'value'])
.swaplevel(axis='columns')
)
| bsd-3-clause |
ChrisBird/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
nikitasingh981/scikit-learn | examples/applications/plot_face_recognition.py | 44 | 5706 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset:
================== ============ ======= ========== =======
precision recall f1-score support
================== ============ ======= ========== =======
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
================== ============ ======= ========== =======
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
starry99/catmap | catmap/analyze/analysis_base.py | 1 | 26787 | import catmap
from catmap import ReactionModelWrapper
from catmap.model import ReactionModel as RM
from copy import copy
from scipy.stats import norm
from matplotlib.ticker import MaxNLocator
import os
import math
plt = catmap.plt
pickle= catmap.pickle
np = catmap.np
spline = catmap.spline
mtransforms = catmap.mtransforms
griddata = catmap.griddata
basic_colors = [[0,0,0],[0,0,1],[0.1,1,0.1],[1,0,0],[0,1,1],[1,0.5,0],[1,0.9,0],
[1,0,1],[0,0.5,0.5],[0.5,0.25,0.15],[0.5,0.5,0.5]]
#black,blue,green,red,cyan,orange,yellow,magenta,turquoise,brown,gray
def get_colors(n_colors):
"Get n colors"
if n_colors <len(basic_colors):
return basic_colors[0:n_colors]
else:
longlist= basic_colors*n_colors
return longlist[0:n_colors]
def boltzmann_vector(energy_list,vector_list,temperature):
#create a vector which is a boltzmann average of the vector_list weighted
#with energies in the energy_list.
def boltzmann_avg(es,ns,T):
kB = 8.613e-5 #assuming energies are in eV and T is in K
es = [e-min(es) for e in es] #normalize to minimum energy
exp_sum = sum([np.exp(-e/(kB*T)) for e in es])
exp_weighted = [n*np.exp(-e/(kB*T))/exp_sum for n,e in zip(ns,es)]
Z = sum(exp_weighted)
return Z
vars = zip(*vector_list)
boltz_vec = [boltzmann_avg(energy_list,v,temperature) for v in vars]
return boltz_vec
class MapPlot:
def __init__(self):
defaults = dict(
resolution_enhancement = 1,
min = None,
max = None,
n_ticks = 8,
plot_function = None,
colorbar = True,
colormap = plt.cm.jet,
axis_label_decimals = 2,
log_scale = False,
descriptor_labels = ['X_descriptor','Y_descriptor'],
default_descriptor_pt_args = {'marker':'o'},
default_descriptor_label_args = {},
descriptor_pt_args = {},
descriptor_label_args = {},
plot_size = 4,
aspect = None,
subplots_adjust_kwargs = {'hspace':0.35,'wspace':0.35,
'bottom':0.15}
)
for key in defaults:
val = defaults[key]
if not hasattr(self,key):
setattr(self,key,val)
elif getattr(self,key) is None:
setattr(self,key,val)
def update_descriptor_args(self):
if getattr(self,'descriptor_dict',None):
if self.descriptor_pt_args == {}:
for pt in self.descriptor_dict:
self.descriptor_pt_args[pt] = copy(
self.default_descriptor_pt_args)
if self.descriptor_label_args == {}:
for pt in self.descriptor_dict:
self.descriptor_label_args[pt] = copy(
self.default_descriptor_label_args)
def plot_descriptor_pts(self,ax,idx=None):
if getattr(self,'descriptor_dict',None):
self.update_descriptor_args()
for key in self.descriptor_dict:
pt_kwargs = self.descriptor_pt_args.get(key,
self.default_descriptor_pt_args)
lab_kwargs = self.descriptor_label_args.get(key,
self.default_descriptor_label_args)
x,y = self.descriptor_dict[key]
if None not in [x,y]:
if pt_kwargs is not None:
ax.errorbar(x,y,**pt_kwargs)
if lab_kwargs is not None:
ax.annotate(key,[x,y],**lab_kwargs)
ax.set_xlim(self.descriptor_ranges[0])
ax.set_ylim(self.descriptor_ranges[1])
def plot_single(self, mapp, rxn_index, ax=None,
overlay_map = None, alpha_range=None,
**plot_args):
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
xy,rates = zip(*mapp)
dim = len(xy[0])
if dim == 1:
x = zip(*xy)
descriptor_ranges = [[min(x),max(x)]]
if not self.plot_function:
if self.log_scale == True:
self.plot_function = 'semilogx'
else:
self.plot_function = 'plot'
elif dim == 2:
x,y = zip(*xy)
descriptor_ranges = [[min(x),max(x)],[min(y),max(y)]]
if not self.plot_function:
self.plot_function = 'contourf'
if 'cmap' not in plot_args:
plot_args['cmap'] = self.colormap
eff_res =self.resolution*self.resolution_enhancement
if self.min:
minval = self.min
else:
minval = None
maparray = RM.map_to_array(mapp,descriptor_ranges,eff_res,
log_interpolate=self.log_scale,minval=minval)
if self.max is None:
self.max = maparray.T[rxn_index].max()
if self.min is None:
self.min = maparray.T[rxn_index].min()
if maparray.min() <= self.min:
plot_args['extend'] = 'min'
if maparray.max() >= self.max:
plot_args['extend'] = 'max'
if maparray.max() >= self.max and maparray.min() <= self.min:
plot_args['extend'] = 'both'
if 'extend' not in plot_args:
plot_args['extend'] = 'neither'
if self.log_scale and dim == 2:
maparray = np.log10(maparray)
min_val = np.log10(float(self.min))
max_val = np.log10(float(self.max))
if min_val < -200:
min_val = max(maparray.min(),-200)
elif max_val == np.inf:
max_val = min(maparray.max(),200)
else:
min_val = self.min
max_val = self.max
maparray = np.clip(maparray,min_val,max_val)
log_scale = self.log_scale
if overlay_map:
overlay_array = RM.map_to_array(overlay_map,
descriptor_ranges,eff_res)
if alpha_range:
alpha_min,alpha_max = alpha_range
else:
alpha_min = overlay_array.min()
alpha_max = overlay_array.max()
overlay_array = (overlay_array - overlay_array.min())
overlay_array = overlay_array/(alpha_max - alpha_min)
overlay_array = np.clip(overlay_array,0,1)
maparray = np.clip(maparray,min_val,max_val)
norm_array = (maparray - maparray.min())
norm_array = norm_array/(maparray.max()-maparray.min())
maparray = norm_array*overlay_array
maparray = (maparray - maparray.min())
maparray = maparray/(maparray.max()-maparray.min())
maparray = maparray*(max_val-min_val) + min_val
maparray=norm_array*overlay_array
norm_array = (maparray - maparray.min())
norm_array = norm_array/(maparray.max()-maparray.min())
maparray = norm_array*(max_val-min_val)+min_val
if dim == 1:
x_range = descriptor_ranges[0]
plot_in = [np.linspace(*x_range+[eff_res]),maparray[:,rxn_index]]
elif dim == 2:
x_range,y_range = descriptor_ranges
z = maparray[:,:,rxn_index]
if self.log_scale:
levels = range(int(min_val),int(max_val)+1)
if len(levels) < 3*self.n_ticks:
levels = np.linspace(
int(min_val),int(max_val),3*self.n_ticks)
else:
levels = np.linspace(min_val,max_val,min(eff_res,25))
plot_in = [np.linspace(*x_range+[eff_res]),
np.linspace(*y_range+[eff_res]),z,levels]
plot = getattr(ax,self.plot_function)(*plot_in,**plot_args)
pos = ax.get_position()
if self.aspect:
ax.set_aspect(self.aspect)
ax.apply_aspect()
if dim == 2:
if self.colorbar:
if log_scale: #take only integer tick labels
cbar_nums = range(int(min_val),int(max_val)+1)
mod = int(len(cbar_nums)/self.n_ticks)
cbar_nums = [n for i,n in enumerate(cbar_nums) if not i%mod]
cbar_nums = np.array(cbar_nums)
else:
cbar_nums = np.linspace(min_val,max_val,self.n_ticks)
formatstring = '%.'+str(self.axis_label_decimals)+'g'
cbar_labels = [formatstring % (s,) for s in cbar_nums]
cbar_labels = [lab.replace('e-0','e-').replace('e+0','e')
for lab in cbar_labels]
plot.set_clim(min_val,max_val)
fig = ax.get_figure()
axpos = list(ax.get_position().bounds)
xsize = axpos[2]*0.04
ysize = axpos[3]
xp = axpos[0]+axpos[2]+0.04*axpos[2]
yp = axpos[1]
cbar_box = [xp,yp,xsize,ysize]
cbar_ax = fig.add_axes(cbar_box)
cbar = fig.colorbar(mappable=plot,ticks=cbar_nums,
cax=cbar_ax,extend=plot_args['extend'])
cbar.ax.set_yticklabels(cbar_labels)
ax.set_xlim(descriptor_ranges[0])
ax.set_ylim(descriptor_ranges[1])
if 'title' in plot_args and plot_args['title']:
if 'title_size' not in plot_args:
n_pts = self.plot_size*72
font_size = min([n_pts/len(plot_args['title']),14])
else:
font_size = plot_args['title_size']
ax.set_title(plot_args['title'],size=font_size)
if self.descriptor_labels:
ax.set_xlabel(self.descriptor_labels[0])
ax.set_ylabel(self.descriptor_labels[1])
if self.n_xticks:
ax.xaxis.set_major_locator(MaxNLocator(self.n_xticks))
if self.n_yticks:
ax.yaxis.set_major_locator(MaxNLocator(self.n_yticks))
return ax
def plot_separate(self,mapp,ax_list=None,indices=None,
overlay_map = None,**plot_single_kwargs):
pts,rates = zip(*mapp)
if indices is None:
indices = range(0,len(rates[0]))
n_plots = len(indices)
if not ax_list:
x = int(np.sqrt(n_plots))
if x*x < n_plots:
y = x+1
else:
y = x
if x*y < n_plots:
x = x+1
if self.colorbar:
fig = plt.figure(
figsize=(y*self.plot_size*1.25,x*self.plot_size))
else:
fig = plt.figure(figsize=(y*self.plot_size,x*self.plot_size))
ax_list = []
for i in range(0,n_plots):
ax_list.append(fig.add_subplot(x,y,i+1))
else:
fig = ax_list[0].get_figure()
if fig:
fig.subplots_adjust(**self.subplots_adjust_kwargs)
else:
fig = plt.gcf()
fig.subplots_adjust(**self.subplots_adjust_kwargs)
plotnum = 0
old_dict = copy(self.__dict__)
if not self.min or not self.max:
for id,i in enumerate(indices):
pts, datas = zip(*mapp)
dat_min = 1e99
dat_max = -1e99
for col in zip(*datas):
if min(col) < dat_min:
dat_min = min(col)
if max(col) > dat_max:
dat_max = max(col)
if self.min is None:
self.min = dat_min
if self.max is None:
self.max = dat_max
for id,i in enumerate(indices):
kwargs = plot_single_kwargs
if self.map_plot_labels:
try:
kwargs['title'] = self.map_plot_labels[i]
except IndexError:
kwargs['title'] = ''
kwargs['overlay_map'] = overlay_map
self.__dict__.update(old_dict)
self.plot_single(mapp,i,ax=ax_list[plotnum],**kwargs)
self.plot_descriptor_pts(ax_list[plotnum],i)
plotnum+=1
return fig
def plot_weighted(self,mapp,ax=None,weighting='linear',
second_map=None,indices=None,**plot_args):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
if self.color_list is None:
color_list = get_colors(len(mapp[0][-1])+1)
color_list.pop(0) #remove black
else:
color_list = self.color_list
pts,datas = zip(*mapp)
if indices is None:
indices = range(0,len(datas[0]))
rgbs = []
datas = zip(*datas)
datas = [d for id,d in enumerate(datas) if id in indices]
datas = zip(*datas)
if second_map:
pts2,datas2 = zip(*second_map)
datas2 = zip(*datas2)
datas2 = [d for id,d in enumerate(datas2) if id in indices]
datas2 = zip(*datas2)
else:
datas2 = datas
for data,data2 in zip(datas,datas2):
if weighting=='linear':
rs,gs,bs = zip(*color_list)
r = 1 - sum(float((1-ri)*di) for ri,di in zip(rs,data))
g = 1 - sum(float((1-gi)*di) for gi,di in zip(gs,data))
b = 1 - sum(float((1-bi)*di) for bi,di in zip(bs,data))
eff_res = self.resolution*self.resolution_enhancement
rgbs.append([r,g,b])
elif weighting =='dual':
rs,gs,bs = zip(*color_list)
r = 1 - sum(float((1-ri)*di*d2i)
for ri,di,d2i in zip(rs,data,data2))
g = 1 - sum(float((1-gi)*di*d2i)
for gi,di,d2i in zip(gs,data,data2))
b = 1 - sum(float((1-bi)*di*d2i)
for bi,di,d2i in zip(bs,data,data2))
eff_res = 300
rgbs.append([r,g,b])
r,g,b = zip(*rgbs)
x,y = zip(*pts)
xi = np.linspace(min(x),max(x),eff_res)
yi = np.linspace(min(y),max(y),eff_res)
ri = griddata(x,y,r,xi,yi)
gi = griddata(x,y,g,xi,yi)
bi = griddata(x,y,b,xi,yi)
rgb_array = np.zeros((eff_res,eff_res,3))
for i in range(0,eff_res):
for j in range(0,eff_res):
rgb_array[i,j,0] = ri[i,j]
rgb_array[i,j,1] = gi[i,j]
rgb_array[i,j,2] = bi[i,j]
xminmax,yminmax = self.descriptor_ranges
xmin,xmax = xminmax
ymin,ymax = yminmax
ax.imshow(rgb_array,extent=[xmin,xmax,ymin,ymax],origin='lower')
self.plot_descriptor_pts(ax)
if self.n_xticks:
ax.xaxis.set_major_locator(MaxNLocator(self.n_xticks))
if self.n_yticks:
ax.yaxis.set_major_locator(MaxNLocator(self.n_yticks))
ax.set_xlabel(self.descriptor_labels[0])
ax.set_ylabel(self.descriptor_labels[1])
if self.aspect:
ax.set_aspect(self.aspect)
ax.apply_aspect()
return fig
def save(self,fig,save=True,default_name = 'map_plot.pdf'):
if save == True:
if not hasattr(self,'output_file'):
save = default_name
else:
save = self.output_file
if save:
fig.savefig(save)
class MechanismPlot:
def __init__(self,energies,barriers=[],labels=[]):
self.energies = energies
self.barriers = barriers
self.labels = labels
self.energy_line_args = {'color':'k','lw':2}
self.barrier_line_args = {'color':'k','lw':2}
self.label_args = {'color':'k','size':16,'rotation':45}
self.label_positions= None
self.initial_energy = 0
self.initial_stepnumber = 0
self.energy_mode ='relative' #absolute
self.energy_line_widths = 0.5
def draw(self,ax=None):
def attr_to_list(attrname,required_length=len(self.energies)):
try:
getattr(self,attrname)[0] #Ensure that it is a list
iter(getattr(self,attrname)) #Ensure that it is a list...
if len(getattr(self,attrname)) == required_length:
pass
else:
raise ValueError(attrname + ' list is of length '+ \
str(len(getattr(self,attrname)))+ \
', but needs to be of length ' + \
str(required_length))
return getattr(self,attrname)
except:
return [getattr(self,attrname)]*required_length
barrier_line_args = attr_to_list('barrier_line_args',
len(self.energies)-1)
energy_line_widths = attr_to_list('energy_line_widths')
energy_line_args = attr_to_list('energy_line_args')
label_args =attr_to_list('label_args')
label_positions=attr_to_list('label_positions')
#plot energy lines
energy_list = np.array(self.energies)
energy_list = (energy_list - energy_list[0])
energy_list = list(energy_list)
if self.energy_mode == 'relative':
cum_energy = [energy_list[0]]
for i,e in enumerate(energy_list[1:]):
last = cum_energy[i]+e
cum_energy.append(last)
energy_list = cum_energy
energy_list = np.array(energy_list) + self.initial_energy
energy_list = list(energy_list)
energy_lines = [
[[i+self.initial_stepnumber,i+width+self.initial_stepnumber],
[energy_list[i]]*2]
for i,width in enumerate(energy_line_widths)]
for i,line in enumerate(energy_lines):
ax.plot(*line,**energy_line_args[i])
#create barrier lines
barrier_lines = []
if not self.barriers: self.barriers = [0]*(len(self.energies)-1)
for i,barrier in enumerate(self.barriers):
xi = energy_lines[i][0][1]
xf = energy_lines[i+1][0][0]
yi = energy_lines[i][1][0]
yf = energy_lines[i+1][1][0]
if barrier == 0 or barrier <= yf-yi:
line = [[xi,xf],[yi,yf]]
else:
yts = yi+barrier
barrier_rev = barrier + (yi-yf)
ratio = np.sqrt(barrier)/(np.sqrt(barrier)+np.sqrt(barrier_rev))
xts = xi + ratio*(xf-xi)
xs = [xi,xts,xf]
ys = [yi,yts,yf]
f = spline(xs,ys,k=2)
newxs = np.linspace(xi,xf,20)
newys = f(newxs)
line = [newxs,newys]
barrier_lines.append(line)
#plot barrier lines
for i,line in enumerate(barrier_lines):
ax.plot(*line,**barrier_line_args[i])
#add labels
trans = ax.get_xaxis_transform()
for i,label in enumerate(self.labels):
xpos = sum(energy_lines[i][0])/len(energy_lines[i][0])
label_position = label_positions[i]
args = label_args[i]
if label_position in ['top','ymax']:
if 'ha' not in args:
args['ha'] = 'left'
if 'va' not in args:
args['va'] = 'bottom'
ypos = 1
args['transform'] = trans
ax.text(xpos,ypos,label,**args)
elif label_position in ['bot','bottom','ymin']:
ypos = -0.1
ax.xaxis.set_ticks([float(sum(line[0])/len(line[0]))
for line in energy_lines])
ax.set_xticklabels(self.labels)
for attr in args.keys():
try:
[getattr(t,'set_'+attr)(args[attr])
for t in ax.xaxis.get_ticklabels()]
except:
pass
else:
ypos = energy_lines[i][1][0]
if 'ha' not in args:
args['ha'] = 'left'
if 'va' not in args:
args['va'] = 'bottom'
ax.annotate(label,[xpos,ypos],**args)
class ScalingPlot:
def __init__(self,descriptor_names,descriptor_dict,surface_names,
parameter_dict,scaling_function,x_axis_function,
scaling_function_kwargs={},x_axis_function_kwargs={},
):
self.descriptor_names = descriptor_names
self.surface_names = surface_names
self.descriptor_dict = descriptor_dict
self.parameter_dict = parameter_dict
self.scaling_function = scaling_function
#function to project descriptors into energies.
#Should take descriptors as an argument and return a
#dictionary of {adsorbate:energy} pairs.
self.scaling_function_kwargs = scaling_function_kwargs
self.x_axis_function = x_axis_function
#function to project descriptors onto the x-axis.
#Should take descriptors as an argument and return a
#dictionary of {adsorbate:x_value} pairs.
self.x_axis_function_kwargs = x_axis_function_kwargs
self.axis_label_size = 16
self.surface_label_size = 16
self.title_size = 18
self.same_scale = True
self.show_titles = True
self.show_surface_labels = True
self.subplots_adjust_kwargs = {'wspace':0.4,'hspace':0.4}
self.x_label_dict = {}
self.y_label_dict = {}
self.surface_colors = []
self.scaling_line_args = {}
self.label_args = {}
self.line_args = {}
self.include_empty = True
self.include_error_histogram = True
def plot(self,ax_list=None,plot_size=4.0,save=None):
all_ads = self.adsorbate_names + self.transition_state_names
all_ads = [a for a in all_ads if a in self.parameter_dict.keys()]
if self.include_empty:
ads_names = all_ads
else:
ads_names = [n for n in all_ads if
(None in self.parameter_dict[n] or
sum(self.parameter_dict[n])>0.0)]
if not self.surface_colors:
self.surface_colors = get_colors(len(self.surface_names))
if not self.scaling_line_args:
self.scaling_line_args = [{'color':'k'}]*len(ads_names)
elif hasattr(self.scaling_line_args,'update'): #its a dictionary if so.
self.scaling_line_args = [self.scaling_line_args]*len(
self.adsorbate_names)
for d in self.descriptor_names:
if d in ads_names:
ads_names.remove(d)
if self.include_error_histogram:
extra = 1
else:
extra = 0
if not ax_list:
spx = round(np.sqrt(len(ads_names)+extra))
spy = round(np.sqrt(len(ads_names)+extra))
if spy*spx < len(ads_names):
spy+= 1
fig = plt.figure(figsize=(spy*plot_size,spx*plot_size))
ax_list = [fig.add_subplot(spx,spy,i+1)
for i in range(len(ads_names))]
else:
fig = None
all_xs, all_ys = zip(*[self.descriptor_dict[s]
for s in self.surface_names])
fig.subplots_adjust(**self.subplots_adjust_kwargs)
all_ys = []
maxyrange = 0
ymins = []
all_err = []
for i,ads in enumerate(ads_names):
actual_y_vals = self.parameter_dict[ads]
desc_vals = [self.descriptor_dict[s] for s in self.surface_names]
scaled_x_vals = [self.x_axis_function(
d,**self.x_axis_function_kwargs)[0][ads] for d in desc_vals]
label = self.x_axis_function(
desc_vals[0],**self.x_axis_function_kwargs)[-1][ads]
scaled_y_vals = [self.scaling_function(
d,**self.scaling_function_kwargs)[ads] for d in desc_vals]
diffs = [scaled-actual for scaled,actual
in zip(scaled_y_vals,actual_y_vals) if actual != None]
ax = ax_list[i]
m,b = plt.polyfit(scaled_x_vals,scaled_y_vals,1)
x_vals = np.array([round(min(scaled_x_vals),1)-0.1,
round(max(scaled_x_vals),1)+0.1])
ax.plot(x_vals,m*x_vals+b,**self.scaling_line_args[i])
err = [yi - (m*xi+b) for xi,yi in zip(scaled_x_vals,actual_y_vals) if yi != None]
all_err += err
ax.set_xlabel(label)
ax.set_ylabel('$E_{'+ads+'}$ [eV]')
num_y_vals = []
# for s,c in zip(self.surface_names,self.surface_colors):
# print s, c
for sf,col,x,y in zip(self.surface_names,
self.surface_colors,scaled_x_vals,actual_y_vals):
if y and y != None:
ax.plot(x,y,'o',color=col,markersize=10,mec=None)
if self.show_surface_labels:
ax.annotate(sf,[x,y],color=col,**self.label_args)
num_y_vals.append(y)
if self.show_titles:
ax.set_title('$'+ads+'$',size=self.title_size)
all_ys += num_y_vals
if not num_y_vals: num_y_vals = scaled_y_vals
dy = max(num_y_vals) - min(num_y_vals)
ymins.append([min(num_y_vals),max(num_y_vals)])
if dy > maxyrange:
maxyrange = dy
ax.set_xlim(x_vals)
y_range = [round(min(num_y_vals),1)-0.1,
round(max(num_y_vals),1)+0.1]
self.scaling_error = all_err
if self.same_scale == True:
for i,ax in enumerate(ax_list):
pad = maxyrange - (ymins[i][1]-ymins[i][0])
y_range = [round(ymins[i][0]-pad,1)-0.1,
round(ymins[i][1]+pad,1)+0.1]
ax.set_ylim(y_range)
if self.include_error_histogram:
err_ax = fig.add_subplot(spx,spy,len(ads_names)+1)
err_ax.hist(all_err,bins=15)
err_ax.set_xlabel('$E_{actual} - E_{scaled}$ [eV]')
err_ax.set_ylabel('Counts')
ax_list.append(err_ax)
for ax in ax_list:
if self.n_xticks:
ax.xaxis.set_major_locator(MaxNLocator(self.n_xticks))
if self.n_yticks:
ax.yaxis.set_major_locator(MaxNLocator(self.n_yticks))
if save is None:
save = self.model_name+'_scaling.pdf'
if save:
fig.savefig(save)
return fig
| gpl-3.0 |
badlands-model/BayesLands | bl_surflikl.py | 1 | 19750 | ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the BayesLands surface processes modelling companion. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
#Main Contributer: Danial Azam Email: [email protected]
"""
This script is intended to implement functionality to generate the likelihood surface of the free parameters.
"""
import os
import numpy as np
import random
import time
import math
import copy
import fnmatch
import shutil
import plotly
import collections
import plotly.plotly as py
import matplotlib as mpl
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import cmocean as cmo
import plotly.graph_objs as go
from copy import deepcopy
from pylab import rcParams
from PIL import Image
from io import StringIO
from cycler import cycler
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from scipy.spatial import cKDTree
from scipy import stats
from sklearn.preprocessing import normalize
from pyBadlands.model import Model as badlandsModel
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import multivariate_normal
from plotly.graph_objs import *
from plotly.offline.offline import _plot_html
plotly.offline.init_notebook_mode()
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
class BayesLands():
def __init__(self, muted, simtime, samples, real_elev , real_erdp, real_erdp_pts, erdp_coords, filename, xmlinput, erodlimits, rainlimits, mlimit, nlimit, marinelimit, aeriallimit, run_nb, likl_sed):
self.filename = filename
self.input = xmlinput
self.real_elev = real_elev
self.real_erdp = real_erdp
self.real_erdp_pts = real_erdp_pts
self.erdp_coords = erdp_coords
self.likl_sed = likl_sed
self.simtime = simtime
self.samples = samples
self.run_nb = run_nb
self.muted = muted
self.erodlimits = erodlimits
self.rainlimits = rainlimits
self.mlimit = mlimit
self.nlimit = nlimit
self.marinelimit = marinelimit
self.aeriallimit = aeriallimit
self.initial_erod = []
self.initial_rain = []
self.initial_m = []
self.initial_n = []
self.step_rain = (rainlimits[1]- rainlimits[0])*0.01
self.step_erod = (erodlimits[1] - erodlimits[0])*0.01
self.step_m = (mlimit[1] - mlimit[0])*0.01
self.step_n = (nlimit[1] - nlimit[0])*0.01
self.sim_interval = np.arange(0, self.simtime+1, self.simtime/4)
self.burn_in = 0.0
def blackBox(self, rain, erodibility, m , n, marinediff, aerialdiff):
"""
Main entry point for running badlands model with different forcing conditions.
The following forcing conditions can be used:
- different uniform rain (uniform meaning same precipitation value on the entire region)
- different uniform erodibility (uniform meaning same erodibility value on the entire region)
Parameters
----------
variable : inputname
XML file defining the parameters used to run Badlands simulation.
variable: rain
Requested uniform precipitation value.
variable: erodibility
Requested uniform erodibility value.
variable: etime
Duration of the experiment.
Return
------
The function returns 2D numpy arrays containing the following information:
variable: elev
Elevation as a 2D numpy array (regularly spaced dataset with resolution equivalent to simulation one)
variable: erdp
Cumulative erosion/deposition accumulation as a 2D numpy array (regularly spaced as well)
"""
tstart = time.clock()
# Re-initialise badlands model
model = badlandsModel()
# Load the XmL input file
model.load_xml(str(self.run_nb), self.input, muted = self.muted)
# Adjust erodibility based on given parameter
model.input.SPLero = erodibility
model.flow.erodibility.fill(erodibility)
# Adjust precipitation values based on given parameter
model.force.rainVal[:] = rain
#Adjust m and n values
model.input.SPLm = m
model.input.SPLn = n
model.input.CDm = marinediff
model.input.CDa = aerialdiff
elev_vec = collections.OrderedDict()
erdp_vec = collections.OrderedDict()
erdp_pts_vec = collections.OrderedDict()
for x in range(len(self.sim_interval)):
self.simtime = self.sim_interval[x]
model.run_to_time(self.simtime, muted = self.muted)
elev, erdp = self.interpolateArray(model.FVmesh.node_coords[:, :2], model.elevation, model.cumdiff)
erdp_pts = np.zeros((self.erdp_coords.shape[0]))
for count, val in enumerate(self.erdp_coords):
erdp_pts[count] = erdp[val[0], val[1]]
elev_vec[self.simtime] = elev
erdp_vec[self.simtime] = erdp
erdp_pts_vec[self.simtime] = erdp_pts
# print 'Badlands black box model took (s):',time.clock()-tstart
return elev_vec, erdp_vec, erdp_pts_vec
def interpolateArray(self, coords=None, z=None, dz=None):
"""
Interpolate the irregular spaced dataset from badlands on a regular grid.
"""
x, y = np.hsplit(coords, 2)
dx = (x[1]-x[0])[0]
nx = int((x.max() - x.min())/dx+1)
ny = int((y.max() - y.min())/dx+1)
xi = np.linspace(x.min(), x.max(), nx)
yi = np.linspace(y.min(), y.max(), ny)
xi, yi = np.meshgrid(xi, yi)
xyi = np.dstack([xi.flatten(), yi.flatten()])[0]
XY = np.column_stack((x,y))
tree = cKDTree(XY)
distances, indices = tree.query(xyi, k=3)
if len(z[indices].shape) == 3:
z_vals = z[indices][:,:,0]
dz_vals = dz[indices][:,:,0]
else:
z_vals = z[indices]
dz_vals = dz[indices]
zi = np.average(z_vals,weights=(1./distances), axis=1)
dzi = np.average(dz_vals,weights=(1./distances), axis=1)
onIDs = np.where(distances[:,0] == 0)[0]
if len(onIDs) > 0:
zi[onIDs] = z[indices[onIDs,0]]
dzi[onIDs] = dz[indices[onIDs,0]]
zreg = np.reshape(zi,(ny,nx))
dzreg = np.reshape(dzi,(ny,nx))
return zreg,dzreg
def viewGrid(self, plot_name ,fname, Z, rain, erod, width = 1000, height = 1000, zmin = None, zmax = None, zData = None, title='Export Grid'):
"""
Use Plotly library to visualise the grid in 3D.
Parameters
----------
variable : resolution
Required resolution for the model grid (in metres).
variable: width
Figure width.
variable: height
Figure height.
variable: zmin
Minimal elevation.
variable: zmax
Maximal elevation.
variable: height
Figure height.
variable: zData
Elevation data to plot.
variable: title
Title of the graph.
"""
zData = Z
if zmin == None:
zmin = zData.min()
if zmax == None:
zmax = zData.max()
axislabelsize = 20
data = Data([ Surface( x=rain, y=erod, z=zData ) ])
layout = Layout(
autosize=True,
width=width,
height=height,
scene=Scene(
zaxis=ZAxis(title = 'L ', range=[zmin,zmax], autorange=False, nticks=5, gridcolor='rgb(255, 255, 255)',
gridwidth=2, zerolinecolor='rgb(255, 255, 255)', zerolinewidth=2, showticklabels = True, titlefont=dict(size=axislabelsize),
tickfont=dict(size=14 ),),
xaxis=XAxis(title = 'Rain ',nticks = 8, gridcolor='rgb(255, 255, 255)', gridwidth=2,zerolinecolor='rgb(255, 255, 255)',
zerolinewidth=2, showticklabels = True, titlefont=dict(size=axislabelsize), tickfont=dict(size=14 ),),
yaxis=YAxis(title = 'Erodibility ',nticks = 8, gridcolor='rgb(255, 255, 255)', gridwidth=2,zerolinecolor='rgb(255, 255, 255)',
zerolinewidth=2, showticklabels = True, titlefont=dict(size=axislabelsize), tickfont=dict(size=14 ),),
bgcolor="rgb(244, 244, 248)"
)
)
fig = Figure(data=data, layout=layout)
camera = dict(up=dict(x=0, y=0, z=1),
center=dict(x=0.0, y=0.0, z=0.0),
eye=dict(x=1.25, y=1.25, z=1.25)
)
fig['layout'].update(scene=dict(camera=camera))
graph = plotly.offline.plot(fig, auto_open=False, output_type='file', filename='%s/plots/elev_grid_%s.html' %(fname, plot_name), validate=False)
return
def plotFunctions(self, fname, pos_likl, pos_rain, pos_erod):
nb_bins=30
font = 9
width = 1
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
ax.set_title(' Likelihood', fontsize= font+2)#, y=1.02)
ax1 = fig.add_subplot(211, projection = '3d')
ax1.set_facecolor('#f2f2f3')
X = pos_rain
Y = pos_erod
R = X/Y
X, Y = np.meshgrid(X, Y)
Z = pos_likl
print 'X shape ', X.shape, 'Y shape ', Y.shape, 'Z shape ', Z.shape
surf = ax1.plot_surface(X,Y,Z, cmap = cm.coolwarm, linewidth= 0, antialiased = False)
ax1.set_zlim(Z.min(), Z.max())
ax1.zaxis.set_major_locator(LinearLocator(10))
ax1.zaxis.set_major_formatter(FormatStrFormatter('%.05f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig('%s/plot.png'% (fname), bbox_inches='tight', dpi=300, transparent=False)
plt.show()
def storeParams(self, naccept, pos_rain, pos_erod, pos_m, pos_n, tausq_elev, tausq_erdp_pts, pos_likl):
"""
"""
pos_likl = str(pos_likl)
pos_rain = str(pos_rain)
pos_erod = str(pos_erod)
pos_m = str(pos_m)
pos_n = str(pos_n)
tausq_elev = str(np.sqrt(tausq_elev))
tausq_erdp_pts = str(np.sqrt(tausq_erdp_pts))
if not os.path.isfile(('%s/exp_data.txt' % (self.filename))):
with file(('%s/exp_data.txt' % (self.filename)),'w') as outfile:
# outfile.write('\n# {0}\t'.format(naccept))
outfile.write(pos_rain)
outfile.write('\t')
outfile.write(pos_erod)
outfile.write('\t')
outfile.write(pos_m)
outfile.write('\t')
outfile.write(pos_n)
outfile.write('\t')
outfile.write(pos_likl)
outfile.write('\t')
outfile.write(tausq_elev)
outfile.write('\t')
outfile.write(tausq_erdp_pts)
outfile.write('\n')
else:
with file(('%s/exp_data.txt' % (self.filename)),'a') as outfile:
outfile.write(pos_rain)
outfile.write('\t')
outfile.write(pos_erod)
outfile.write('\t')
outfile.write(pos_m)
outfile.write('\t')
outfile.write(pos_n)
outfile.write('\t')
outfile.write(pos_likl)
outfile.write('\t')
outfile.write(tausq_elev)
outfile.write('\t')
outfile.write(tausq_erdp_pts)
outfile.write('\n')
def likelihoodFunc(self,input_vector, real_elev, real_erdp, real_erdp_pts):
"""
"""
pred_elev_vec, pred_erdp_vec, pred_erdp_pts_vec = self.blackBox(input_vector[0], input_vector[1], input_vector[2], input_vector[3], input_vector[4], input_vector[5])
tausq_elev = (np.sum(np.square(pred_elev_vec[self.simtime] - real_elev)))/real_elev.size
sq_error_elev = (np.sum(np.square(pred_elev_vec[self.simtime] - real_elev)))/real_elev.size
tausq_erdp_pts = np.zeros(self.sim_interval.size)
for i in range(self.sim_interval.size):
tausq_erdp_pts[i] = np.sum(np.square(pred_erdp_pts_vec[self.sim_interval[i]] - self.real_erdp_pts[i]))/real_erdp_pts.shape[1]
# print 'tausq_erdp_pts' , tausq_erdp_pts
likelihood_elev = -0.5 * np.log(2* math.pi * tausq_elev) - 0.5 * np.square(pred_elev_vec[self.simtime] - real_elev) / tausq_elev
likelihood_erdp_pts = 0
if self.likl_sed:
#likelihood_erdp = -0.5 * np.log(2* math.pi * tausq_erdp) - 0.5 * np.square(pred_erdp_vec[self.simtime] - real_erdp) / tausq_erdp
for i in range(1,self.sim_interval.size):
likelihood_erdp_pts += np.sum(-0.5 * np.log(2* math.pi * tausq_erdp_pts[i]) - 0.5 * np.square(pred_erdp_pts_vec[self.sim_interval[i]] - self.real_erdp_pts[i]) / tausq_erdp_pts[i])
likelihood = np.sum(likelihood_elev) + (likelihood_erdp_pts)
sq_error_erdp_pts = np.sum(np.square(pred_erdp_pts_vec[self.sim_interval[i]] - self.real_erdp_pts[i]))/real_erdp_pts.shape[1]
sq_error = sq_error_elev+ sq_error_erdp_pts
print 'Using sediment pts in the likelihood'
else:
likelihood = np.sum(likelihood_elev)
sq_error_erdp_pts = 0
sq_error = sq_error_elev + sq_error_erdp_pts
return likelihood, sq_error, sq_error_elev, sq_error_erdp_pts
def likelihoodSurface(self):
# Initializing variables
samples = self.samples
real_elev = self.real_elev
real_erdp = self.real_erdp
real_erdp_pts = self.real_erdp_pts
# Creating storage for data
pos_erod = np.zeros(samples)
pos_rain = np.zeros(samples)
pos_m = np.zeros(samples)
pos_n = np.zeros(samples)
pos_marinediff = np.zeros(samples)
pos_aerialdiff = np.zeros(samples)
# List of accepted samples
count_list = []
rain = np.linspace(self.rainlimits[0], self.rainlimits[1], num = int(math.sqrt(samples)), endpoint = False)
erod = np.linspace(self.erodlimits[0], self.erodlimits[1], num = int(math.sqrt(samples)), endpoint = False)
dimx = rain.shape[0]
dimy = erod.shape[0]
pos_likl = np.zeros((dimx, dimy))
pos_sq_error = np.zeros((dimx, dimy))
pos_tau_elev = np.zeros((dimx, dimy))
pos_tau_erdp_pts = np.zeros((dimx, dimy))
# print 'pos_likl', pos_likl.shape, 'pos_rain', pos_rain, 'pos_erod', pos_erod
# Storing RMSE, tau values and adding initial run to accepted list
start = time.time()
i = 0
for r in range(len(rain)):
for e in range(len(erod)):
print '\n'
print 'Rain : ', rain[r], ' Erod : ', erod[e]
print 'Simtime', self.simtime
# Updating rain parameter and checking limits
p_rain = rain[r]
# Updating edodibility parameter and checking limits
p_erod = erod[e]
p_m = np.random.normal(0.5, 0.05)
p_n = np.random.normal(1.0, 0.05)
p_marinediff = np.random.normal(np.mean(self.marinelimit), np.std(self.marinelimit)/2)
p_aerialdiff = np.random.normal(np.mean(self.aeriallimit), np.std(self.aeriallimit)/2)
# Creating storage for parameters to be passed to blackBox model
v_proposal = []
v_proposal.append(p_rain)
v_proposal.append(p_erod)
v_proposal.append(p_m)
v_proposal.append(p_n)
v_proposal.append(p_marinediff)
v_proposal.append(p_aerialdiff)
# Passing paramters to calculate likelihood and rmse with new tau
likelihood, sq_error, tau_elev, tau_erdp_pts = self.likelihoodFunc(v_proposal,real_elev, real_erdp, real_erdp_pts)
print 'sq_error : ', sq_error, 'tau_elev :', tau_elev, 'tau_erdp_pts: ',tau_erdp_pts
pos_erod[i] = p_erod
pos_rain[i] = p_rain
pos_m[i] = p_m
pos_n[i] = p_n
pos_marinediff[i] = p_marinediff
pos_aerialdiff[i] = p_aerialdiff
pos_likl[r,e] = likelihood
pos_sq_error[r,e] = sq_error
pos_tau_elev[r,e] = tau_elev
pos_tau_erdp_pts[r,e] = tau_erdp_pts
self.storeParams(i, pos_rain[i], pos_erod[i], pos_m[i], pos_n[i],tau_elev, tau_erdp_pts, pos_likl[r,e])
i += 1
# self.plotFunctions(self.filename, pos_likl, rain, erod)
self.viewGrid('Log_likelihood ',self.filename, pos_likl, rain, erod)
self.viewGrid('Sum Squared Error',self.filename, pos_sq_error, rain, erod)
end = time.time()
total_time = end - start
print 'counter', i, '\nTime elapsed:', total_time, '\npos_likl.shape', pos_likl.shape
return (pos_rain, pos_erod, pos_likl)
def main():
random.seed(time.time())
muted = True
run_nb = 0
directory = ""
likl_sed = False
erdp_coords_crater = np.array([[60,60],[52,67],[74,76],[62,45],[72,66],[85,73],[90,75],[44,86],[100,80],[88,69]])
erdp_coords_crater_fast = np.array([[60,60],[72,66],[85,73],[90,75],[44,86],[100,80],[88,69],[79,91],[96,77],[42,49]])
erdp_coords_etopo = np.array([[42,10],[39,8],[75,51],[59,13],[40,5],[6,20],[14,66],[4,40],[72,73],[46,64]])
erdp_coords_etopo_fast = np.array([[42,10],[39,8],[75,51],[59,13],[40,5],[6,20],[14,66],[4,40],[68,40],[72,44]])
choice = input("Please choose a Badlands example to run the likelihood surface generator on:\n 1) crater_fast\n 2) crater\n 3) etopo_fast\n 4) etopo\n")
samples = input("Please enter number of samples (Make sure it is a perfect square): \n")
if choice == 1:
directory = 'Examples/crater_fast'
xmlinput = '%s/crater.xml' %(directory)
simtime = 15000
rainlimits = [0.0, 3.0]
erodlimits = [3.e-5, 7.e-5]
mlimit = [0.4, 0.6]
nlimit = [0.9, 1.1]
marinelimit = [5.e-3,4.e-2]
aeriallimit = [3.e-2,7.e-2]
true_rain = 1.5
true_erod = 5.e-5
likl_sed = True
erdp_coords = erdp_coords_crater_fast
elif choice == 2:
directory = 'Examples/crater'
xmlinput = '%s/crater.xml' %(directory)
simtime = 50000
rainlimits = [0.0, 3.0]
erodlimits = [3.e-5, 7.e-5]
mlimit = [0.4, 0.6]
nlimit = [0.9, 1.1]
marinelimit = [5.e-3,4.e-2]
aeriallimit = [3.e-2,7.e-2]
true_rain = 1.5
true_erod = 5.e-5
likl_sed = True
erdp_coords = erdp_coords_crater
elif choice == 3:
directory = 'Examples/etopo_fast'
xmlinput = '%s/etopo.xml' %(directory)
simtime = 500000
rainlimits = [0.0, 3.0]
erodlimits = [3.e-6, 7.e-6]
mlimit = [0.4, 0.6]
nlimit = [0.9, 1.1]
marinelimit = [0.3,0.7]
aeriallimit = [0.6,1.0]
true_rain = 1.5
true_erod = 5.e-6
likl_sed = True
erdp_coords = erdp_coords_etopo_fast
elif choice == 4:
directory = 'Examples/etopo'
xmlinput = '%s/etopo.xml' %(directory)
simtime = 1000000
rainlimits = [0.0, 3.0]
erodlimits = [3.e-6, 7.e-6]
mlimit = [0.4, 0.6]
nlimit = [0.9, 1.1]
marinelimit = [0.3,0.7]
aeriallimit = [0.6,1.0]
true_rain = 1.5
true_erod = 5.e-6
likl_sed = True
erdp_coords = erdp_coords_etopo
elif choice == 5:
directory = 'Examples/mountain'
xmlinput = '%s/mountain.xml' %(directory)
simtime = 1000000
rainlimits = [0.0, 3.0]
erodlimits = [3.e-6, 7.e-6]
mlimit = [0.4, 0.6]
nlimit = [0.9, 1.1]
marinelimit = [0.3,0.7]
aeriallimit = [0.6,1.0]
true_rain = 1.5
true_erod = 5.e-6
likl_sed = True
erdp_coords = erdp_coords_etopo
else:
print('Invalid selection, please choose a problem from the list ')
final_elev = np.loadtxt('%s/data/final_elev.txt' %(directory))
final_erdp = np.loadtxt('%s/data/final_erdp.txt' %(directory))
final_erdp_pts = np.loadtxt('%s/data/final_erdp_pts.txt' %(directory))
while os.path.exists('%s/liklSurface_%s' % (directory,run_nb)):
run_nb+=1
if not os.path.exists('%s/liklSurface_%s' % (directory,run_nb)):
os.makedirs('%s/liklSurface_%s' % (directory,run_nb))
os.makedirs('%s/liklSurface_%s/plots' % (directory,run_nb))
os.makedirs('%s/liklSurface_%s/prediction_data' % (directory,run_nb))
filename = ('%s/liklSurface_%s' % (directory,run_nb))
with file(('%s/liklSurface_%s/description.txt' % (directory,run_nb)),'a') as outfile:
outfile.write('\n\tsamples: {0}'.format(samples))
outfile.write('\n\terod_limits: {0}'.format(erodlimits))
outfile.write('\n\train_limits: {0}'.format(rainlimits))
outfile.write('\n\terdp coords: {0}'.format(erdp_coords))
outfile.write('\n\tlikl_sed: {0}'.format(likl_sed))
outfile.write('\n\tfilename: {0}'.format(filename))
print '\nInput file shape', final_elev.shape, '\n'
run_nb_str = 'liklSurface_' + str(run_nb)
bLands = BayesLands(muted, simtime, samples, final_elev, final_erdp, final_erdp_pts, erdp_coords, filename, xmlinput, erodlimits, rainlimits, mlimit, nlimit, marinelimit, aeriallimit, run_nb_str, likl_sed)
[pos_rain, pos_erod, pos_likl] = bLands.likelihoodSurface()
print 'Results are stored in ', filename
print 'Finished producing Likelihood Surface'
if __name__ == "__main__": main() | gpl-3.0 |
simonsfoundation/CaImAn | use_cases/eLife_scripts/Figure_4-figure_supplement1.py | 2 | 5336 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script for exploring performance of CaImAn online as a function of:
i) min_SNR: minimum trace SNR for accepting candidate components
ii) thresh_CNN_noisy: minimum CNN threshold for accepting candidate components
iii) min_num_trial: number of candidate components per frame
The scripts loads the pre-computed results for combinations over a small grid
over these parameters and produces a Figure showing the best overall
performance as well as the best average performance for parameters more
suitable to short or long datasets. The best performance for each dataset
is also ploted. See the companion paper for more details.
@author: epnevmatikakis
"""
import numpy as np
import os
import matplotlib.pyplot as plt
# %% list and sort files
base_folder = '/mnt/ceph/neuro/DataForPublications/DATA_PAPER_ELIFE/WEBSITE'
with np.load(os.path.join(base_folder, 'all_records_grid_online.npz'), allow_pickle=True) as ld:
records_online = ld['records']
records_online = [list(rec) for rec in records_online]
# %% extract values
datasets = [rec[0] for rec in records_online[:9]]
inds = [rec[1:4] for rec in records_online[::9]]
RC_arr = np.array([float(rec[4]) for rec in records_online]).reshape(len(inds), 9)
PR_arr = np.array([float(rec[5]) for rec in records_online]).reshape(len(inds), 9)
F1_arr = np.array([float(rec[6]) for rec in records_online]).reshape(len(inds), 9)
#%% bar plot
colors = ['r','b','g','m']
n_groups = len(datasets)
datasets_names = [ds[:-1] + '\n' + str(ln) for (ds,ln) in zip(datasets, lengths)]
ind_i = [np.argmax(f) for f in F1_arr.T]
ind_mean = np.argmax(F1_arr.mean(1))
ind_small = np.argmax(F1_arr[:,[0,1,3,4,5]].mean(1))
ind_large = np.argmax(F1_arr[:,6:].mean(1))
#F1_mean = F1_arr[ind_max]
F1_small = F1_arr[ind_small]
F1_large = F1_arr[ind_large]
F1_mean = F1_arr[ind_mean]
F1_max = F1_arr.max(0)
PR_mean = PR_arr[ind_mean]
PR_small = PR_arr[ind_small]
PR_large = PR_arr[ind_large]
PR_max = np.array([PR_arr[ind_i[i],i] for i in range(len(datasets))])
RC_mean = RC_arr[ind_mean]
RC_small = RC_arr[ind_small]
RC_large = RC_arr[ind_large]
RC_max = np.array([RC_arr[ind_i[i],i] for i in range(len(datasets))])
# create plot
plt.subplots()
index = np.arange(n_groups)
bar_width = 0.18
opacity = 1
plt.subplot(3,1,1)
rects0 = plt.bar(index, F1_small, bar_width,
alpha=opacity,
color=colors[0],
label='low threshold')
rects1 = plt.bar(index + bar_width, F1_large, bar_width,
alpha=opacity,
color=colors[1],
label='high threshold')
rects2 = plt.bar(index + 2*bar_width, F1_mean, bar_width,
alpha=opacity,
color=colors[2],
label='mean')
rects3 = plt.bar(index + 3*bar_width, F1_max, bar_width,
alpha=opacity,
color=colors[3],
label='max')
plt.xlabel('Dataset')
plt.ylabel('$F_1$ Scores')
ax1 = plt.gca()
ax1.set_ylim([0.6,0.85])
plt.xticks(index + bar_width, datasets_names)
plt.legend()
plt.tight_layout()
plt.subplot(3,1,2)
rects0 = plt.bar(index, PR_small, bar_width,
alpha=opacity,
color=colors[0],
label='low threshold')
rects1 = plt.bar(index + bar_width, PR_large, bar_width,
alpha=opacity,
color=colors[1],
label='high threshold')
rects2 = plt.bar(index + 2*bar_width, PR_mean, bar_width,
alpha=opacity,
color=colors[2],
label='mean')
rects3 = plt.bar(index + 3*bar_width, PR_max, bar_width,
alpha=opacity,
color=colors[3],
label='max')
plt.xlabel('Dataset')
plt.ylabel('Precision')
ax2 = plt.gca()
ax2.set_ylim([0.55,0.95])
plt.xticks(index + bar_width, datasets_names)
plt.tight_layout()
plt.subplot(3,1,3)
rects0 = plt.bar(index, RC_small, bar_width,
alpha=opacity,
color=colors[0],
label='low threshold')
rects1 = plt.bar(index + bar_width, RC_large, bar_width,
alpha=opacity,
color=colors[1],
label='high threshold')
rects2 = plt.bar(index + 2*bar_width, RC_mean, bar_width,
alpha=opacity,
color=colors[2],
label='mean')
rects3 = plt.bar(index + 3*bar_width, RC_max, bar_width,
alpha=opacity,
color=colors[3],
label='max')
plt.xlabel('Dataset')
plt.ylabel('Recall')
ax2 = plt.gca()
ax2.set_ylim([0.425,0.9])
plt.xticks(index + bar_width, datasets_names)
plt.tight_layout()
plt.show()
plt.rcParams['pdf.fonttype'] = 42
font = {'family': 'Arial',
'weight': 'regular',
'size': 20}
plt.rc('font', **font)
#%% print the parameter combinations
print('Low threshold vals: (min_SNR, CNN_thr, num_comp)= ' + str(inds[ind_small]))
print('High threshold vals: (min_SNR, CNN_thr, num_comp)= ' + str(inds[ind_large]))
print('Best overall vals: (min_SNR, CNN_thr, num_comp)= ' + str(inds[ind_mean]))
for (dataset, ind_mx) in zip(datasets, ind_i):
print('Best value for dataset ' + str(dataset) + ' was obtained for parameters (min_SNR, CNN_thr, num_comp)= ' + str(inds[ind_mx]))
| gpl-2.0 |
DiracInstitute/kbmod | analysis/trajectory_utils.py | 2 | 4598 | import numpy as np
from lsst.sims.utils import CoordinateTransformations as ct
import matplotlib.pyplot as plt
def inclined_vec(a,i,t,theta_0=0.,omega0=2*np.pi):
omega = a**(-3/2.)*omega0
theta_0_rad = np.deg2rad(theta_0)
x = a*np.cos(omega*t + theta_0_rad)
y = a*np.cos(i)*np.sin(omega*t + theta_0_rad)
z = a*np.sin(i)*np.sin(omega*t + theta_0_rad)
return x,y,z
def earth_vec(t,omega0=2*np.pi):
x = np.cos(omega0*t)
y = np.sin(omega0*t)
z = 0
return x,y,z
def diff_vec(a,i,t,theta_0=0.,omega0=2*np.pi):
x_o, y_o, z_o = inclined_vec(a,i,t,theta_0=theta_0,omega0=omega0)
x_e, y_e, z_e = earth_vec(t,omega0=omega0)
x_new = x_o-x_e
y_new = y_o-y_e
z_new = z_o-z_e
return np.array((x_new, y_new, z_new))
def plot_trajectory(radius, incl, maxTime, dt, theta_0):
"""radius, in AU
incl, inclination in degrees
maxTime, max time of plot in years
dt, time step in years
theta_0, time 0 angle along orbit in relation to sun-Earth line.
Note: probably would be more useful to have angle from
opposition as viewed from Earth. Will make this change
in future update
"""
time_step = np.arange(0,maxTime,dt)
lon = []
lat = []
for time in time_step:
lon_now,lat_now = ct.sphericalFromCartesian(diff_vec(radius,np.deg2rad(incl),time,theta_0))
lon.append(ct.arcsecFromRadians(lon_now))
lat.append(ct.arcsecFromRadians(lat_now))
lon = np.array(lon)
lat = np.array(lat)
#fig = plt.figure(figsize=(12,12))
plt.scatter(lon, lat, c=time_step, lw=0)
plt.xlabel('Long (arcsec)')
plt.ylabel('Lat (arcsec)')
plt.title('Trajectory of object over %.2f years' % maxTime)
plt.colorbar()
def plot_ang_vel(radius, incl, maxTime, dt, theta_0):
"""radius, in AU
incl, inclination in degrees
maxTime, max time of plot in years
dt, time step in years
theta_0, time 0 angle along orbit in relation to sun-Earth line.
Note: probably would be more useful to have angle from
opposition as viewed from Earth. Will make this change
in future update
"""
time_step = np.arange(0,maxTime,dt)
lon = []
lat = []
for time in time_step:
lon_now,lat_now = ct.sphericalFromCartesian(diff_vec(radius,np.deg2rad(incl),time,theta_0))
lon.append(lon_now)
lat.append(lat_now)
lon = np.array(lon)
lat = np.array(lat)
ang_vel = []
for array_val in range(0, len(lon)-1):
ang_vel.append(ct.arcsecFromRadians(ct.haversine(lon[array_val], lat[array_val],
lon[array_val+1], lat[array_val+1]))/(dt*365*24))
#fig = plt.figure(figsize=(12,12))
plt.plot(time_step[:-1], ang_vel)
plt.ylabel('Arcsec/hr')
plt.xlabel('Time (yrs)')
plt.title('Max Angular Velocity = %.4e arcsec/hr.' % np.max(ang_vel))
def get_trajectory(radius, incl, dt):
"""
Returns longitude and latitude coordiantes of full orbit in ecliptic coordinates.
radius, in AU
incl, inclination in degrees
dt, time step in years
"""
time_step = np.arange(0,np.sqrt(radius**3.),dt)
lon = []
lat = []
for time in time_step:
lon_now,lat_now = ct.sphericalFromCartesian(diff_vec(radius,np.deg2rad(incl),time,0.))
lon.append(ct.arcsecFromRadians(lon_now))
lat.append(ct.arcsecFromRadians(lat_now))
lon = np.array(lon)
lat = np.array(lat)
return ct.degreesFromArcsec(lon), ct.degreesFromArcsec(lat)
def get_ang_vel(radius, incl, dt):
"""
radius, in AU
incl, inclination in degrees
dt, time step in years
"""
time_step = np.arange(0,np.sqrt(radius**3.),dt)
lon = []
lat = []
for time in time_step:
lon_now,lat_now = ct.sphericalFromCartesian(diff_vec(radius,np.deg2rad(incl),time,0.))
lon.append(lon_now)
lat.append(lat_now)
lon = np.array(lon)
lat = np.array(lat)
ang_vel = []
for array_val in range(0, len(lon)-1):
ang_vel.append(ct.arcsecFromRadians(ct.haversine(lon[array_val], lat[array_val],
lon[array_val+1], lat[array_val+1]))/(dt*365*24))
#fig = plt.figure(figsize=(12,12))
lat_diff = lat[1:] - lat[:-1]
lon_diff = lon[1:] - lon[:-1]
angle_travelled = np.arctan2(lat_diff, lon_diff)
allowed_range = np.where(np.abs(np.degrees(angle_travelled)) < 15.)
return np.array(ang_vel), np.degrees(angle_travelled)
| bsd-2-clause |
fernand/scipy | scipy/signal/windows.py | 32 | 53971 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/clipboard/__init__.py | 14 | 3443 | """
Pyperclip
A cross-platform clipboard module for Python. (only handles plain text for now)
By Al Sweigart [email protected]
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
if not pyperclip.copy:
print("Copy functionality unavailable!")
On Windows, no additional modules are needed.
On Mac, the module uses pbcopy and pbpaste, which should come with the os.
On Linux, install xclip or xsel via package manager. For example, in Debian:
sudo apt-get install xclip
Otherwise on Linux, you will need the gtk or PyQt4 modules installed.
gtk and PyQt4 modules are not available for Python 3,
and this module does not work with PyGObject yet.
"""
__version__ = '1.5.27'
import platform
import os
import subprocess
from .clipboards import (init_osx_clipboard,
init_gtk_clipboard, init_qt_clipboard,
init_xclip_clipboard, init_xsel_clipboard,
init_klipper_clipboard, init_no_clipboard)
from .windows import init_windows_clipboard
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
# Thus, we need to detect the presence of $DISPLAY manually
# and not load PyQt4 if it is absent.
HAS_DISPLAY = os.getenv("DISPLAY", False)
CHECK_CMD = "where" if platform.system() == "Windows" else "which"
def _executable_exists(name):
return subprocess.call([CHECK_CMD, name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def determine_clipboard():
# Determine the OS/platform and set
# the copy() and paste() functions accordingly.
if 'cygwin' in platform.system().lower():
# FIXME: pyperclip currently does not support Cygwin,
# see https://github.com/asweigart/pyperclip/issues/55
pass
elif os.name == 'nt' or platform.system() == 'Windows':
return init_windows_clipboard()
if os.name == 'mac' or platform.system() == 'Darwin':
return init_osx_clipboard()
if HAS_DISPLAY:
# Determine which command/module is installed, if any.
try:
# Check if gtk is installed
import gtk # noqa
except ImportError:
pass
else:
return init_gtk_clipboard()
try:
# Check if PyQt4 is installed
import PyQt4 # noqa
except ImportError:
pass
else:
return init_qt_clipboard()
if _executable_exists("xclip"):
return init_xclip_clipboard()
if _executable_exists("xsel"):
return init_xsel_clipboard()
if _executable_exists("klipper") and _executable_exists("qdbus"):
return init_klipper_clipboard()
return init_no_clipboard()
def set_clipboard(clipboard):
global copy, paste
clipboard_types = {'osx': init_osx_clipboard,
'gtk': init_gtk_clipboard,
'qt': init_qt_clipboard,
'xclip': init_xclip_clipboard,
'xsel': init_xsel_clipboard,
'klipper': init_klipper_clipboard,
'windows': init_windows_clipboard,
'no': init_no_clipboard}
copy, paste = clipboard_types[clipboard]()
copy, paste = determine_clipboard()
__all__ = ["copy", "paste"]
# pandas aliases
clipboard_get = paste
clipboard_set = copy
| agpl-3.0 |
vibhorag/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
jorik041/glances | glances/core/glances_main.py | 11 | 15502 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Glances main class."""
# Import system libs
import argparse
import os
import sys
import tempfile
# Import Glances libs
from glances.core.glances_config import Config
from glances.core.glances_globals import appname, is_linux, is_windows, psutil_version, version
from glances.core.glances_logging import logger
class GlancesMain(object):
"""Main class to manage Glances instance."""
# Default stats' refresh time is 3 seconds
refresh_time = 3
# Set the default cache lifetime to 1 second (only for server)
# !!! Todo: configuration from the command line
cached_time = 1
# By default, Glances is ran in standalone mode (no client/server)
client_tag = False
# Server TCP port number (default is 61209)
server_port = 61209
# Web Server TCP port number (default is 61208)
web_server_port = 61208
# Default username/password for client/server mode
username = "glances"
password = ""
# Exemple of use
example_of_use = "\
Examples of use:\n\
\n\
Monitor local machine (standalone mode):\n\
$ glances\n\
\n\
Monitor local machine with the Web interface (Web UI):\n\
$ glances -w\n\
Glances web server started on http://0.0.0.0:61208/\n\
\n\
Monitor local machine and export stats to a CSV file (standalone mode):\n\
$ glances --export-csv\n\
\n\
Monitor local machine and export stats to a InfluxDB server with 5s refresh time (standalone mode):\n\
$ glances -t 5 --export-influxdb\n\
\n\
Start a Glances server (server mode):\n\
$ glances -s\n\
\n\
Connect Glances to a Glances server (client mode):\n\
$ glances -c <ip_server>\n\
\n\
Connect Glances to a Glances server and export stats to a StatsD server (client mode):\n\
$ glances -c <ip_server> --export-statsd\n\
\n\
Start the client browser (browser mode):\n\
$ glances --browser\n\
"
def __init__(self):
"""Manage the command line arguments."""
self.args = self.parse_args()
def init_args(self):
"""Init all the command line arguments."""
_version = "Glances v" + version + " with psutil v" + psutil_version
parser = argparse.ArgumentParser(
prog=appname,
conflict_handler='resolve',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=self.example_of_use)
parser.add_argument(
'-V', '--version', action='version', version=_version)
parser.add_argument('-d', '--debug', action='store_true', default=False,
dest='debug', help='enable debug mode')
parser.add_argument('-C', '--config', dest='conf_file',
help='path to the configuration file')
# Enable or disable option on startup
parser.add_argument('--disable-network', action='store_true', default=False,
dest='disable_network', help='disable network module')
parser.add_argument('--disable-ip', action='store_true', default=False,
dest='disable_ip', help='disable IP module')
parser.add_argument('--disable-diskio', action='store_true', default=False,
dest='disable_diskio', help='disable disk I/O module')
parser.add_argument('--disable-fs', action='store_true', default=False,
dest='disable_fs', help='disable filesystem module')
parser.add_argument('--disable-sensors', action='store_true', default=False,
dest='disable_sensors', help='disable sensors module')
parser.add_argument('--disable-hddtemp', action='store_true', default=False,
dest='disable_hddtemp', help='disable HD temperature module')
parser.add_argument('--disable-raid', action='store_true', default=False,
dest='disable_raid', help='disable RAID module')
parser.add_argument('--disable-docker', action='store_true', default=False,
dest='disable_docker', help='disable Docker module')
parser.add_argument('--disable-left-sidebar', action='store_true',
default=False, dest='disable_left_sidebar',
help='disable network, disk I/O, FS and sensors modules (py3sensors needed)')
parser.add_argument('--disable-process', action='store_true', default=False,
dest='disable_process', help='disable process module')
parser.add_argument('--disable-log', action='store_true', default=False,
dest='disable_log', help='disable log module')
parser.add_argument('--disable-quicklook', action='store_true', default=False,
dest='disable_quicklook', help='disable quick look module')
parser.add_argument('--disable-bold', action='store_false', default=True,
dest='disable_bold', help='disable bold mode in the terminal')
parser.add_argument('--enable-process-extended', action='store_true', default=False,
dest='enable_process_extended', help='enable extended stats on top process')
parser.add_argument('--enable-history', action='store_true', default=False,
dest='enable_history', help='enable the history mode (matplotlib needed)')
parser.add_argument('--path-history', default=tempfile.gettempdir(),
dest='path_history', help='set the export path for graph history')
# Export modules feature
parser.add_argument('--export-csv', default=None,
dest='export_csv', help='export stats to a CSV file')
parser.add_argument('--export-influxdb', action='store_true', default=False,
dest='export_influxdb', help='export stats to an InfluxDB server (influxdb needed)')
parser.add_argument('--export-statsd', action='store_true', default=False,
dest='export_statsd', help='export stats to a StatsD server (statsd needed)')
parser.add_argument('--export-rabbitmq', action='store_true', default=False,
dest='export_rabbitmq', help='export stats to rabbitmq broker (pika needed)')
# Client/Server option
parser.add_argument('-c', '--client', dest='client',
help='connect to a Glances server by IPv4/IPv6 address or hostname')
parser.add_argument('-s', '--server', action='store_true', default=False,
dest='server', help='run Glances in server mode')
parser.add_argument('--browser', action='store_true', default=False,
dest='browser', help='start the client browser (list of servers)')
parser.add_argument('--disable-autodiscover', action='store_true', default=False,
dest='disable_autodiscover', help='disable autodiscover feature')
parser.add_argument('-p', '--port', default=None, type=int, dest='port',
help='define the client/server TCP port [default: {0}]'.format(self.server_port))
parser.add_argument('-B', '--bind', default='0.0.0.0', dest='bind_address',
help='bind server to the given IPv4/IPv6 address or hostname')
parser.add_argument('--password', action='store_true', default=False, dest='password_prompt',
help='define a client/server password')
parser.add_argument('--snmp-community', default='public', dest='snmp_community',
help='SNMP community')
parser.add_argument('--snmp-port', default=161, type=int,
dest='snmp_port', help='SNMP port')
parser.add_argument('--snmp-version', default='2c', dest='snmp_version',
help='SNMP version (1, 2c or 3)')
parser.add_argument('--snmp-user', default='private', dest='snmp_user',
help='SNMP username (only for SNMPv3)')
parser.add_argument('--snmp-auth', default='password', dest='snmp_auth',
help='SNMP authentication key (only for SNMPv3)')
parser.add_argument('--snmp-force', action='store_true', default=False,
dest='snmp_force', help='force SNMP mode')
parser.add_argument('-t', '--time', default=self.refresh_time, type=float,
dest='time', help='set refresh time in seconds [default: {0} sec]'.format(self.refresh_time))
parser.add_argument('-w', '--webserver', action='store_true', default=False,
dest='webserver', help='run Glances in web server mode (bottle needed)')
# Display options
parser.add_argument('-q', '--quiet', default=False, action='store_true',
dest='quiet', help='do not display the curses interface')
parser.add_argument('-f', '--process-filter', default=None, type=str,
dest='process_filter', help='set the process filter pattern (regular expression)')
parser.add_argument('--process-short-name', action='store_true', default=False,
dest='process_short_name', help='force short name for processes name')
if not is_windows:
parser.add_argument('--hide-kernel-threads', action='store_true', default=False,
dest='no_kernel_threads', help='hide kernel threads in process list')
if is_linux:
parser.add_argument('--tree', action='store_true', default=False,
dest='process_tree', help='display processes as a tree')
parser.add_argument('-b', '--byte', action='store_true', default=False,
dest='byte', help='display network rate in byte per second')
parser.add_argument('-1', '--percpu', action='store_true', default=False,
dest='percpu', help='start Glances in per CPU mode')
parser.add_argument('--fs-free-space', action='store_false', default=False,
dest='fs_free_space', help='display FS free space instead of used')
parser.add_argument('--theme-white', action='store_true', default=False,
dest='theme_white', help='optimize display colors for white background')
return parser
def parse_args(self):
"""Parse command line arguments."""
args = self.init_args().parse_args()
# Load the configuration file, if it exists
self.config = Config(args.conf_file)
# Debug mode
if args.debug:
from logging import DEBUG
logger.setLevel(DEBUG)
# Client/server Port
if args.port is None:
if args.webserver:
args.port = self.web_server_port
else:
args.port = self.server_port
# Autodiscover
if args.disable_autodiscover:
logger.info("Auto discover mode is disabled")
# In web server mode, defaul refresh time: 5 sec
if args.webserver:
args.time = 5
args.process_short_name = True
# Server or client login/password
args.username = self.username
if args.password_prompt:
# Interactive or file password
if args.server:
args.password = self.__get_password(
description='Define the password for the Glances server',
confirm=True)
elif args.client:
args.password = self.__get_password(
description='Enter the Glances server password',
clear=True)
else:
# Default is no password
args.password = self.password
# By default help is hidden
args.help_tag = False
# Display Rx and Tx, not the sum for the network
args.network_sum = False
args.network_cumul = False
# Control parameter and exit if it is not OK
self.args = args
# Filter is only available in standalone mode
if args.process_filter is not None and not self.is_standalone():
logger.critical("Process filter is only available in standalone mode")
sys.exit(2)
# Check graph output path
if args.enable_history and args.path_history is not None:
if not os.access(args.path_history, os.W_OK):
logger.critical("History output path {0} do not exist or is not writable".format(args.path_history))
sys.exit(2)
logger.debug("History output path is set to {0}".format(args.path_history))
# Disable HDDTemp if sensors are disabled
if args.disable_sensors:
args.disable_hddtemp = True
logger.debug("Sensors and HDDTemp are disabled")
return args
def __hash_password(self, plain_password):
"""Hash a plain password and return the hashed one."""
from glances.core.glances_password import GlancesPassword
password = GlancesPassword()
return password.hash_password(plain_password)
def __get_password(self, description='', confirm=False, clear=False):
"""Read a password from the command line.
- if confirm = True, with confirmation
- if clear = True, plain (clear password)
"""
from glances.core.glances_password import GlancesPassword
password = GlancesPassword()
return password.get_password(description, confirm, clear)
def is_standalone(self):
"""Return True if Glances is running in standalone mode."""
return not self.args.client and not self.args.browser and not self.args.server and not self.args.webserver
def is_client(self):
"""Return True if Glances is running in client mode."""
return (self.args.client or self.args.browser) and not self.args.server
def is_client_browser(self):
"""Return True if Glances is running in client browser mode."""
return self.args.browser and not self.args.server
def is_server(self):
"""Return True if Glances is running in server mode."""
return not self.args.client and self.args.server
def is_webserver(self):
"""Return True if Glances is running in Web server mode."""
return not self.args.client and self.args.webserver
def get_config(self):
"""Return configuration file object."""
return self.config
def get_args(self):
"""Return the arguments."""
return self.args
| lgpl-3.0 |
Avsecz/concise | setup.py | 1 | 2175 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
# add back later
if sys.version_info[0] != 3:
# sys.exit("Only Python 3 is supported")
print("WARNING: Only Python 3 is supported")
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
"numpy",
"pandas",
"scipy",
"scikit-learn>=0.18",
"matplotlib",
# "tensorflow", # - not per-se required
# "glmnet",
"keras>=2.0.4",
'hyperopt',
'descartes',
'shapely'
]
test_requirements = [
"pytest",
]
setup(
name='concise',
version='0.6.4',
description="CONCISE (COnvolutional Neural for CIS-regulatory Elements)",
long_description=readme,
author="Žiga Avsec",
author_email='[email protected]',
url='https://github.com/gagneurlab/concise',
packages=find_packages(),
package_data={'concise.resources': ['attract_metadata.txt', 'attract_pwm.txt',
'encode_motifs.txt.gz',
'HOCOMOCOv10_pcms_HUMAN_mono.txt'],
'concise.resources.RNAplfold': ["H_RNAplfold", "I_RNAplfold", "M_RNAplfold", "E_RNAplfold"]},
include_package_data=True,
setup_requires=['numpy'],
install_requires=requirements,
# dependency_links=dependency_links,
license="MIT license",
zip_safe=False,
keywords=["computational biology", "bioinformatics", "genomics",
"deep learning", "tensorflow", ],
extras_require={
'tensorflow': ['tensorflow>=1.0'],
'tensorflow with gpu': ['tensorflow-gpu>=1.0']},
classifiers=[
# classifiers
# default
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| mit |
adamrvfisher/TechnicalAnalysisLibrary | PriceRelativeMAStrategy.py | 1 | 5151 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 4 01:02:22 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Price Relative Moving Average Strategy
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Brute Force Optimization
iterations = range(0, 3000)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = rand.random()
d = rand.random()
if c + d > 1:
continue
e = rand.randint(3,20)
window = int(e)
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.1):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .003:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[6]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[6]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[6]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][4]))
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset1['Position'] = (Dataset[kfloat][0])
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
Dataset[kfloat][2],Dataset[kfloat][0])
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = (Dataset[kfloat][1])
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
Dataset[kfloat][3],Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
#Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(max(drawdown2))
#pd.to_pickle(Portfolio, 'VXX:UVXY') | apache-2.0 |
paztronomer/kepler_tools | addCol.py | 1 | 3019 | '''Adds error column flux to treated lcs
Also save all (treated and untreated) as space-separated values
Search pairs of tables and match them
'''
import numpy as np
import os
import sys
import glob
import matplotlib.pyplot as plt
def VerboseID(kic_int):
kic_str = []
kic_int = map(str,kic_int)
for i in range(0,len(kic_int)):
if len(kic_int[i]) == 5:
kic_str.append('kplr0000' + kic_int[i])
elif len(kic_int[i]) == 6:
kic_str.append('kplr000' + kic_int[i])
elif len(kic_int[i]) == 7:
kic_str.append('kplr00' + kic_int[i])
elif len(kic_int[i]) == 8:
kic_str.append('kplr0' + kic_int[i])
elif len(kic_int[i]) == 9:
kic_str.append('kplr' + kic_int[i])
else:
print '\n\tDummy function encountered some error'
exit(0)
return kic_str
def Pairs(path1,path2,kic_str):
pathList = [[],[]]
for i in range(0,len(kic_str)):
for fname1 in glob.glob(os.path.join(path1, 'treat_*')):
if kic_str[i] in fname1:
pathList[0].append(fname1)
for fname2 in glob.glob(os.path.join(path2, 'd13.kcp*')):
if kic_str[i] in fname2:
pathList[1].append(fname2)
return pathList
def NearestPos(arr1,value2):
return np.argmin(np.abs(arr1-value2))
#function matches elements from both lists, and create updated data
def Match2(path_out,tabs2pair):
for j in range(0,len(tabs2pair[0][:])):
#treated cols: time|flux .dat
trt = np.loadtxt(tabs2pair[0][j],delimiter=' ')
aux_fn1 = tabs2pair[0][j][ tabs2pair[0][j].find('kplr'):tabs2pair[0][j].find('.dat') ]
#with errors cols: time|flux|flux_err .csv
werr = np.loadtxt(tabs2pair[1][j],delimiter=',')
aux_fn2 = tabs2pair[1][j][ tabs2pair[1][j].find('kplr'):tabs2pair[1][j].find('.csv') ]
print '\n\tworking on: {0}'.format(aux_fn1)
time,flux,flux_err = np.empty([0]),np.empty([0]),np.empty([0])
for p in xrange(0,trt.shape[0]):
time = np.append(time,[trt[p,0]],axis=0)
flux = np.append(flux,[trt[p,1]],axis=0)
flux_err = np.append(flux_err, [ werr[NearestPos( werr[:,0],trt[p,0] ),2] ] )
'''After rotate array is ok, but cols must be inserted last-to-first to appear
firs- to-last
'''
out1 = path_out+'kcp_trt_'+aux_fn1+'.tsv'
nrot = 3
np.savetxt(out1,np.rot90(np.vstack([flux_err,flux,time]),nrot),delimiter=' ')
out2 = path_out+'kcp_raw_'+aux_fn2+'.tsv'
np.savetxt(out2,werr,delimiter=' ')
return True
if __name__=='__main__':
path_treat = 's01tos04_treat'
path_werr = 'kcp_lcs'
path_tables = 'LC2work/'
#generate list of paths, to match lists
list2 = Pairs(path_treat,path_werr,VerboseID(np.loadtxt('kcp.txt',dtype='int')))
#match tables
transf = Match2(path_tables,list2)
if transf:
print 'All worked fine'
else:
print '\n\tcalled from another script\n'
| mit |
ndingwall/scikit-learn | sklearn/decomposition/_dict_learning.py | 2 | 59904 | """ Dictionary learning.
"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import ceil
import numpy as np
from scipy import linalg
from joblib import Parallel, effective_n_jobs
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..utils.fixes import delayed
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _check_positive_coding(method, positive):
if positive and method in ["omp", "lars"]:
raise ValueError(
"Positive constraint not supported for '{}' "
"coding method.".format(method)
)
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0,
positive=False):
"""Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components) or None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
check_input : bool, default=True
If `False`, the input arrays `X` and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if dictionary.shape[1] != X.shape[1]:
raise ValueError("Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(
dictionary.shape, X.shape))
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
_check_positive_coding(algorithm, positive)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False,
positive=positive, max_iter=max_iter)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True,
positive=positive)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
if new_code.ndim != 2:
return new_code.reshape(n_samples, n_components)
return new_code
# XXX : could be moved to the linear_model module
@_deprecate_positional_args
def sparse_encode(X, dictionary, *, gram=None, cov=None,
algorithm='lasso_lars', n_nonzero_coefs=None, alpha=None,
copy_cov=True, init=None, max_iter=1000, n_jobs=None,
check_input=True, verbose=0, positive=False):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if effective_n_jobs(n_jobs) == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None, positive=False):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_features, n_samples)
Data matrix.
code : ndarray of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
return_r2 : bool, default=False
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
Returns
-------
dictionary : ndarray of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_features = Y.shape[0]
random_state = check_random_state(random_state)
# Get BLAS functions
gemm, = linalg.get_blas_funcs(('gemm',), (dictionary, code, Y))
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
nrm2, = linalg.get_blas_funcs(('nrm2',), (dictionary,))
# Residuals, computed with BLAS for speed and efficiency
# R <- -1.0 * U * V^T + 1.0 * Y
# Outputs R as Fortran array for efficiency
R = gemm(-1.0, dictionary, code, 1.0, Y)
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :])
if positive:
np.clip(dictionary[:, k], 0, None, out=dictionary[:, k])
# Scale k'th atom
# (U_k * U_k) ** 0.5
atom_norm = nrm2(dictionary[:, k])
if atom_norm < 1e-10:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_features)
if positive:
np.clip(dictionary[:, k], 0, None, out=dictionary[:, k])
# Setting corresponding coefs to 0
code[k, :] = 0.0
# (U_k * U_k) ** 0.5
atom_norm = nrm2(dictionary[:, k])
dictionary[:, k] /= atom_norm
else:
dictionary[:, k] /= atom_norm
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R = nrm2(R) ** 2.0
return dictionary, R
return dictionary
@_deprecate_positional_args
def dict_learning(X, n_components, *, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=None, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False, positive_dict=False,
positive_code=False, method_max_iter=1000):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios.
callback : callable, default=None
Callable that gets invoked every five iterations
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs, positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state,
positive=positive_dict)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
@_deprecate_positional_args
def dict_learning_online(X, n_components=2, *, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True,
n_jobs=None, method='lars', iter_offset=0,
random_state=None, return_inner_stats=False,
inner_stats=None, return_n_iter=False,
positive_dict=False, positive_code=False,
method_max_iter=1000):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int, default=2
Number of dictionary atoms to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=100
Number of mini-batch iterations to perform.
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios.
callback : callable, default=None
callable that gets invoked every five iterations.
batch_size : int, default=3
The number of samples to take in each batch.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default=0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
return_inner_stats : bool, default=False
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If `return_inner_stats` is `True`, `return_code` is
ignored.
inner_stats : tuple of (A, B) ndarrays, default=None
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid losing the history of the evolution.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
dictionary = np.require(dictionary, requirements='W')
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs,
check_input=False,
positive=positive_code,
max_iter=method_max_iter, verbose=verbose).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state,
positive=positive_dict)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False,
positive=positive_code, max_iter=method_max_iter,
verbose=verbose)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class _BaseSparseCoding(TransformerMixin):
"""Base class from SparseCoder and DictionaryLearning algorithms."""
def __init__(self, transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter):
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.transform_max_iter = transform_max_iter
self.split_sign = split_sign
self.n_jobs = n_jobs
self.positive_code = positive_code
def _transform(self, X, dictionary):
"""Private method allowing to accomodate both DictionaryLearning and
SparseCoder."""
X = self._validate_data(X, reset=False)
code = sparse_encode(
X, dictionary, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, positive=self.positive_code)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
return self._transform(X, self.components_)
class SparseCoder(_BaseSparseCoding, BaseEstimator):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution;
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
the estimated components are sparse;
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`lasso_lars`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The unchanged dictionary atoms.
.. deprecated:: 0.24
This attribute is deprecated in 0.24 and will be removed in 0.26.
Use `dictionary` instead.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import SparseCoder
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> coder = SparseCoder(
... dictionary=dictionary, transform_algorithm='lasso_lars',
... transform_alpha=1e-10,
... )
>>> coder.transform(X)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
See Also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
_required_parameters = ["dictionary"]
@_deprecate_positional_args
def __init__(self, dictionary, *, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=None, positive_code=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.dictionary = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : Ignored
y : Ignored
Returns
-------
self : object
"""
return self
@deprecated("The attribute 'components_' is deprecated " # type: ignore
"in 0.24 and will be removed in 0.26. Use the "
"'dictionary' instead.")
@property
def components_(self):
return self.dictionary
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
return super()._transform(X, self.dictionary)
def _more_tags(self):
return {"requires_fit": False}
@property
def n_components_(self):
return self.dictionary.shape[0]
@property
def n_features_in_(self):
return self.dictionary.shape[1]
class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=n_features
Number of dictionary elements to extract.
alpha : float, default=1.0
Sparsity controlling parameter.
max_iter : int, default=1000
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for numerical error.
fit_algorithm : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.0
n_jobs : int or None, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the code, for warm restart.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary, for warm restart.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import DictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> dict_learner = DictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.88...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.07...
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See Also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=None, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None, positive_code=False,
positive_dict=False, transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : Ignored
Returns
-------
self : object
Returns the object itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, alpha=self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=None
Number of dictionary elements to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=1000
Total number of iterations to perform.
fit_algorithm : {'lars', 'cd'}, default='lars'
The algorithm used:
- `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`)
- `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
batch_size : int, default=3
Number of samples in each mini-batch.
shuffle : bool, default=True
Whether to shuffle the samples before forming batches.
dict_init : nbarray of shape (n_components, n_features), default=None
initial value of the dictionary for warm restart scenarios
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Components extracted from the data.
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid losing the
history of the evolution, but they shouldn't have any use for the
end user.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
n_iter_ : int
Number of iterations run.
iter_offset_ : int
The number of iteration on data batches that has been
performed before.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generattor or by `np.random`.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import MiniBatchDictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42)
>>> dict_learner = MiniBatchDictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.87...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.10...
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See Also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=None, batch_size=3, shuffle=True,
dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None,
positive_code=False, positive_dict=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs, transform_alpha,
split_sign, n_jobs, positive_code, transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
self.random_state_ = random_state
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
iter_offset : int, default=None
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
X = self._validate_data(X, reset=(iter_offset == 0))
U, (A, B) = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=1, method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + 1
return self
| bsd-3-clause |
barbagroup/PetIBM | examples/ibpm/cylinder2dRe3000_GPU/scripts/plotDragCoefficient.py | 6 | 2037 | """
Plots the instantaneous drag coefficient between 0 and 3 time-units of flow
simulation and compares with numerical results from
Koumoutsakos and Leonard (1995).
_References:_
* Koumoutsakos, P., & Leonard, A. (1995).
High-resolution simulations of the flow around an impulsively started
cylinder using vortex methods.
Journal of Fluid Mechanics, 296, 1-38.
"""
import os
import pathlib
import numpy
import collections
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
root_dir = os.environ.get('PETIBM_EXAMPLES')
if not root_dir:
root_dir = simu_dir.parents[1]
data = collections.OrderedDict({})
# Reads forces from file.
label = 'PetIBM'
filepath = data_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, usecols=(0, 1))
data[label] = {'t': t, 'cd': 2 * fx}
data[label]['kwargs'] = {}
# Reads drag coefficient of Koumoutsakos and Leonard (1995) for Re=3000.
label = 'Koumoutsakos and Leonard (1995)'
filename = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe3000.dat'
filepath = root_dir / 'data' / filename
with open(filepath, 'r') as infile:
t, cd = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
data[label] = {'t': 0.5 * t, 'cd': cd}
data[label]['kwargs'] = {'linewidth': 0, 'marker': 'o',
'markerfacecolor': 'none', 'markeredgecolor': 'black'}
pyplot.rc('font', family='serif', size=16)
# Plots the instantaneous drag coefficients.
fig, ax = pyplot.subplots(figsize=(8.0, 6.0))
ax.grid()
ax.set_xlabel('Non-dimensional time')
ax.set_ylabel('Drag coefficient')
for label, subdata in data.items():
ax.plot(subdata['t'], subdata['cd'], label=label, **subdata['kwargs'])
ax.axis((0.0, 3.0, 0.0, 2.0))
ax.legend()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'dragCoefficient.png'
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
jefemagril/fermipy | fermipy/sed_plotting.py | 1 | 11967 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for plotting SEDs and Castro plots
Many parts of this code are taken from dsphs/like/lnlfn.py by
Matthew Wood <[email protected]>
Alex Drlica-Wagner <[email protected]>
"""
from __future__ import absolute_import, division, print_function
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
import numpy as np
NORM_LABEL = {
'NORM': r'Flux Normalization [a.u.]',
'flux': r'$F_{\rm min}^{\rm max} [ph $cm^{-2} s^{-1}$]',
'eflux': r'$E F_{\rm min}^{\rm max}$ [MeV $cm^{-2} s^{-1}]$',
'npred': r'$n_{\rm pred}$ [ph]',
'dfde': r'dN/dE [ph $cm^{-2} s^{-1} MeV^{-1}$]',
'edfde': r'E dN/dE [MeV $cm^{-2} s^{-1} MeV^{-1}$]',
'e2dede': r'%E^2% dN/dE [MeV $cm^{-2} s^{-1} MeV^{-1}$]',
'sigvj': r'$J\langle \sigma v \rangle$ [$GeV^{2} cm^{-2} s^{-1}$]',
'sigv': r'$\langle \sigma v \rangle$ [$cm^{3} s^{-1}$]',
}
def plotNLL_v_Flux(nll, fluxType, nstep=25, xlims=None):
""" Plot the (negative) log-likelihood as a function of normalization
nll : a LnLFN object
nstep : Number of steps to plot
xlims : x-axis limits, if None, take tem from the nll object
returns fig,ax, which are matplotlib figure and axes objects
"""
import matplotlib.pyplot as plt
if xlims is None:
xmin = nll.interp.xmin
xmax = nll.interp.xmax
else:
xmin = xlims[0]
xmax = xlims[1]
y1 = nll.interp(xmin)
y2 = nll.interp(xmax)
ymin = min(y1, y2, 0.0)
ymax = max(y1, y2, 0.5)
xvals = np.linspace(xmin, xmax, nstep)
yvals = nll.interp(xvals)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
ax.set_xlabel(NORM_LABEL[fluxType])
ax.set_ylabel(r'$-\Delta \log\mathcal{L}$')
ax.plot(xvals, yvals)
return fig, ax
def make_colorbar(fig, ax, im, zlims):
"""
"""
pdf_adjust = 0.01 # Dealing with some pdf crap...
cax = inset_axes(ax, width="3%", height="100%", loc=3,
bbox_to_anchor=(1.01, 0.0, 1.05, 1.00),
bbox_transform=ax.transAxes,
borderpad=0.)
cbar = fig.colorbar(im, cax, ticks=np.arange(zlims[0], zlims[-1]))
xy = cbar.outline.xy
xy[0:, 0] *= 1 - 5 * pdf_adjust
xy[0:, 1] *= 1 - pdf_adjust
cbar.outline.set_xy(xy)
cax.invert_yaxis()
cax.axis['right'].toggle(ticks=True, ticklabels=True, label=True)
cax.set_ylabel(r"$\Delta \log \mathcal{L}$")
return cax, cbar
def plotCastro_base(castroData, ylims,
xlabel, ylabel, nstep=25, zlims=None, global_min=False):
""" Make a color plot (castro plot) of the
log-likelihood as a function of
energy and flux normalization
castroData : A CastroData_Base object, with the
log-likelihood v. normalization for each energy bin
ylims : y-axis limits
xlabel : x-axis title
ylabel : y-axis title
nstep : Number of y-axis steps to plot for each energy bin
zlims : z-axis limits
global_min : Plot the log-likelihood w.r.t. the global min.
returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects
"""
import matplotlib.pyplot as plt
ymin = ylims[0]
ymax = ylims[1]
if zlims is None:
zmin = -10
zmax = 0.
else:
zmin = zlims[0]
zmax = zlims[1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylim((ymin, ymax))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
normVals = np.logspace(np.log10(ymin), np.log10(ymax), nstep)
ztmp = []
for i in range(castroData.nx):
ztmp.append(castroData[i].interp(normVals))
ztmp = np.asarray(ztmp).T
ztmp *= -1.
ztmp = np.where(ztmp < zmin, np.nan, ztmp)
if global_min:
global_offset = castroData.nll_offsets.min()
offsets = global_offset - castroData.nll_offsets
ztmp += offsets
cmap = plt.get_cmap('jet_r')
xedge = castroData.x_edges()
ax.set_xlim((xedge[0], xedge[-1]))
im = ax.pcolormesh(xedge, normVals, ztmp,
vmin=zmin, vmax=zmax, cmap=cmap, linewidth=0)
#cax = ax
#cbar = plt.colorbar(im)
#cbar.set_label(r"$\Delta \log \mathcal{L}$")
cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax))
#ax.set_ylim()
#plt.gca().set_yscale('log')
#plt.gca().set_xscale('log')
#plt.gca().set_xlim(sed['e_min'][0], sed['e_max'][-1])
#
#cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax))
# cbar = fig.colorbar(im, ticks=np.arange(zmin,zmax),
# fraction=0.10,panchor=(1.05,0.5))
#cbar.set_label(r'$\Delta \log\mathcal{L}$')
#cax = None
return fig, ax, im, ztmp, cax, cbar
def plotCastro(castroData, ylims, nstep=25, zlims=None):
""" Make a color plot (castro plot) of the
delta log-likelihood as a function of
energy and flux normalization
castroData : A CastroData object, with the
log-likelihood v. normalization for each energy bin
ylims : y-axis limits
nstep : Number of y-axis steps to plot for each energy bin
zlims : z-axis limits
returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects
"""
xlabel = "Energy [MeV]"
ylabel = NORM_LABEL[castroData.norm_type]
return plotCastro_base(castroData, ylims,
xlabel, ylabel, nstep, zlims)
def plotSED_OnAxis(ax, castroData, TS_thresh=4.0, errSigma=1.0,
colorLim='red', colorPoint='blue'):
"""
"""
ts_vals = castroData.ts_vals()
mles = castroData.mles()
has_point = ts_vals > TS_thresh
has_limit = ~has_point
ul_vals = castroData.getLimits(0.05)
err_lims_lo, err_lims_hi = castroData.getIntervals(0.32)
err_pos = err_lims_hi - mles
err_neg = mles - err_lims_lo
yerr_points = (err_neg[has_point], err_pos[has_point])
xerrs = (castroData.refSpec.eref - castroData.refSpec.ebins[0:-1],
castroData.refSpec.ebins[1:] - castroData.refSpec.eref)
yerr_limits = (0.5 * ul_vals[has_limit], np.zeros((has_limit.sum())))
ax.errorbar(castroData.refSpec.eref[has_point], mles[has_point],
yerr=yerr_points, fmt='o', color=colorPoint)
ax.errorbar(castroData.refSpec.eref[has_limit], ul_vals[has_limit],
yerr=yerr_limits, lw=1, color=colorLim,
ls='none', zorder=1, uplims=True)
ax.errorbar(castroData.refSpec.eref[has_limit], ul_vals[has_limit],
xerr=(xerrs[0][has_limit], xerrs[1][has_limit]),
lw=1.35, ls='none', color=colorLim, zorder=2, capsize=0)
def plotSED(castroData, ylims, TS_thresh=4.0, errSigma=1.0, specVals=[]):
""" Make a color plot (castro plot) of the (negative) log-likelihood
as a function of energy and flux normalization
castroData : A CastroData object, with the
log-likelihood v. normalization for each energy bin
ylims : y-axis limits
TS_thresh : TS value above with to plot a point,
rather than an upper limit
errSigma : Number of sigma to use for error bars
specVals : List of spectra to add to plot
returns fig,ax which are matplotlib figure and axes objects
"""
import matplotlib.pyplot as plt
xmin = castroData.refSpec.ebins[0]
xmax = castroData.refSpec.ebins[-1]
ymin = ylims[0]
ymax = ylims[1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
ax.set_xlabel("Energy [GeV]")
ax.set_ylabel(NORM_LABEL[castroData.norm_type])
plotSED_OnAxis(ax, castroData, TS_thresh, errSigma)
for spec in specVals:
ax.loglog(castroData.refSpec.eref, spec)
pass
return fig, ax
def compare_SED(castroData1, castroData2, ylims, TS_thresh=4.0,
errSigma=1.0, specVals=[]):
""" Compare two SEDs
castroData1: A CastroData object, with the
log-likelihood v. normalization for each energy bin
castroData2: A CastroData object, with the
log-likelihood v. normalization for each energy bin
ylims : y-axis limits
TS_thresh : TS value above with to plot a point,
rather than an upper limit
errSigma : Number of sigma to use for error bars
specVals : List of spectra to add to plot
returns fig,ax which are matplotlib figure and axes objects
"""
import matplotlib.pyplot as plt
xmin = min(castroData1.refSpec.ebins[0], castroData2.refSpec.ebins[0])
xmax = max(castroData1.refSpec.ebins[-1], castroData2.refSpec.ebins[-1])
ymin = ylims[0]
ymax = ylims[1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
ax.set_xlabel("Energy [GeV]")
ax.set_ylabel(NORM_LABEL[castroData1.norm_type])
plotSED_OnAxis(ax, castroData1, TS_thresh, errSigma,
colorLim='blue', colorPoint='blue')
plotSED_OnAxis(ax, castroData2, TS_thresh, errSigma,
colorLim='red', colorPoint='red')
for spec in specVals:
ax.loglog(castroData1.refSpec.eref, spec)
return fig, ax
if __name__ == "__main__":
from fermipy import castro
import sys
if len(sys.argv) == 1:
flux_type = "FLUX"
else:
flux_type = sys.argv[1]
if flux_type == 'NORM':
xlims = (0., 1.)
flux_lims = (1e-5, 1e-1)
elif flux_type == "FLUX":
xlims = (0., 1.)
flux_lims = (1e-13, 1e-9)
elif flux_type == "EFLUX":
xlims = (0., 1.)
flux_lims = (1e-8, 1e-4)
elif flux_type == "NPRED":
xlims = (0., 1.)
flux_lims = (1e-1, 1e3)
elif flux_type == "DFDE":
xlims = (0., 1.)
flux_lims = (1e-18, 1e-11)
elif flux_type == "EDFDE":
xlims = (0., 1.)
flux_lims = (1e-13, 1e-9)
else:
print(
"Didn't reconginize flux type %s, choose from NORM | FLUX | EFLUX | NPRED | DFDE | EDFDE" % sys.argv[1])
sys.exit()
tscube = castro.TSCube.create_from_fits("tscube_test.fits", flux_type)
resultDict = tscube.find_sources(10.0, 1.0, use_cumul=False,
output_peaks=True,
output_specInfo=True,
output_srcs=True)
peaks = resultDict["Peaks"]
max_ts = tscube.tsmap.counts.max()
(castro, test_dict) = tscube.test_spectra_of_peak(peaks[0])
nll = castro[2]
fig, ax = plotNLL_v_Flux(nll, flux_type)
fig2, ax2, im2, ztmp2 = plotCastro(castro, ylims=flux_lims, nstep=100)
spec_pl = test_dict["PowerLaw"]["Spectrum"]
spec_lp = test_dict["LogParabola"]["Spectrum"]
spec_pc = test_dict["PLExpCutoff"]["Spectrum"]
fig3, ax3 = plotSED(castro, ylims=flux_lims, TS_thresh=2.0,
specVals=[spec_pl])
result_pl = test_dict["PowerLaw"]["Result"]
result_lp = test_dict["LogParabola"]["Result"]
result_pc = test_dict["PLExpCutoff"]["Result"]
ts_pl = test_dict["PowerLaw"]["TS"]
ts_lp = test_dict["LogParabola"]["TS"]
ts_pc = test_dict["PLExpCutoff"]["TS"]
print("TS for PL index = 2: %.1f" % max_ts)
print("Cumulative TS: %.1f" % castro.ts_vals().sum())
print("TS for PL index free: %.1f (Index = %.2f)" %
(ts_pl, result_pl[1]))
print("TS for LogParabola: %.1f (Index = %.2f, Beta = %.2f)" %
(ts_lp, result_lp[1], result_lp[2]))
print("TS for PLExpCutoff: %.1f (Index = %.2f, E_c = %.2f)" %
(ts_pc, result_pc[1], result_pc[2]))
| bsd-3-clause |
kazarinov/hccf | sklearn_experiments.py | 1 | 7038 | # -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
from experiments.ctr_model import CTRModel
from hccf.utils.helpers import Timer
from sklearn.feature_extraction import FeatureHasher
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import log_loss
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
log = logging.getLogger(__name__)
FEATURES_CONFIG = {
'a': {
'count': 64,
'loc': 0.0,
'scale': 0.5,
'type': 'tree',
},
'b': {
'count': 50,
'loc': 0.0,
'scale': 0.5,
'type': 'tree',
},
'axb': {
'loc': 0.0,
'scale': 0.8,
'parts': ['a', 'b'],
}
}
def clean_data(filename):
preprocessor = Pipeline([
('fh', FeatureHasher(n_features=2 ** 13, input_type='string', non_negative=False)),
])
train_data = pd.read_table(filename, sep=',', chunksize=10000)
train_data = train_data.read()
y_train = train_data['click']
train_data.drop(['click'], axis=1, inplace=True) # remove id and click columns
x_train = np.asarray(train_data.astype(str))
y_train = np.asarray(y_train).ravel()
x_train = preprocessor.fit_transform(x_train).toarray()
return x_train, y_train
def clean_data_chunked(filename):
preprocessor = Pipeline([
('fh', FeatureHasher(n_features=2 ** 13, input_type='string', non_negative=False)),
])
train_data = pd.read_table(filename, sep=',', chunksize=1000)
for train_data_chunk in train_data:
print 'process chunk'
y_train = train_data_chunk['click']
train_data_chunk.drop(['click'], axis=1, inplace=True) # remove id and click columns
x_train = np.asarray(train_data_chunk.astype(str))
y_train = np.asarray(y_train).ravel()
x_train = preprocessor.fit_transform(x_train).toarray()
yield x_train, y_train
def create_dataset(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
train_filename = model + '.train.csv'
test_filename = model + '.test.csv'
if from_cache:
real_ctr_model = CTRModel.load(model + '.dat')
else:
with Timer('init real model'):
real_ctr_model = CTRModel(FEATURES_CONFIG, free_coef=-1, lam=100)
real_ctr_model.init()
with Timer('generate clicklog'):
real_ctr_model.generate_log(
filename=model,
format='csv',
train_length=train_dataset_length,
test_length=test_dataset_length,
)
real_ctr_model.save(model + '.dat')
with Timer('calculate likelihood'):
ll = real_ctr_model.loglikelihood()
ll0 = real_ctr_model.loglikelihood0()
likelihood_ratio = real_ctr_model.likelihood_ratio()
log.info('loglikelihood = %s', ll)
log.info('loglikelihood0 = %s', ll0)
log.info('likelihood_ratio = %s', likelihood_ratio)
return train_filename, test_filename
def ctr_gbdt(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
TRAIN_FILE, TEST_FILE = create_dataset(model, from_cache, train_dataset_length, test_dataset_length)
prediction_model = GradientBoostingClassifier(
loss='deviance',
learning_rate=0.1,
n_estimators=30,
subsample=1.0,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=5,
)
x_train, y_train = clean_data(TRAIN_FILE)
x_test, y_test = clean_data(TEST_FILE)
with Timer('fit model'):
prediction_model.fit(x_train, y_train)
with Timer('evaluate model'):
y_prediction_train = prediction_model.predict_proba(x_train)
y_prediction_test = prediction_model.predict_proba(x_test)
loss_train = log_loss(y_train, y_prediction_train)
loss_test = log_loss(y_test, y_prediction_test)
print 'loss_train: %s' % loss_train
print 'loss_test: %s' % loss_test
def ctr_pca_sgd(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
TRAIN_FILE, TEST_FILE = create_dataset(model, from_cache, train_dataset_length, test_dataset_length)
prediction_model = SGDClassifier(
loss='log',
n_iter=200,
alpha=.0000001,
penalty='l2',
learning_rate='invscaling',
power_t=0.5,
eta0=4.0,
shuffle=True,
n_jobs=-1,
)
x_train, y_train = clean_data(TRAIN_FILE)
x_test, y_test = clean_data(TEST_FILE)
pca = PCA(n_components=100)
pca.fit(x_train)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
with Timer('fit model'):
prediction_model.fit(x_train, y_train)
with Timer('evaluate model'):
y_prediction_train = prediction_model.predict_proba(x_train)
y_prediction_test = prediction_model.predict_proba(x_test)
loss_train = log_loss(y_train, y_prediction_train)
loss_test = log_loss(y_test, y_prediction_test)
print 'loss_train: %s' % loss_train
print 'loss_test: %s' % loss_test
def ctr_svm(model='sklearn-clicklog', from_cache=False, train_dataset_length=100000, test_dataset_length=100000):
"""
Doesn't work
"""
TRAIN_FILE, TEST_FILE = create_dataset(model, from_cache, train_dataset_length, test_dataset_length)
prediction_model = LinearSVC(
penalty='l1',
loss='squared_hinge',
dual=False,
tol=0.0001,
C=1.0,
multi_class='ovr',
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
verbose=1,
random_state=None,
max_iter=1000,
)
x_train, y_train = clean_data(TRAIN_FILE)
x_test, y_test = clean_data(TEST_FILE)
with Timer('fit model'):
prediction_model.fit(x_train, y_train)
with Timer('evaluate model'):
y_prediction_train = prediction_model.predict_proba(x_train)
y_prediction_test = prediction_model.predict_proba(x_test)
loss_train = log_loss(y_train, y_prediction_train)
loss_test = log_loss(y_test, y_prediction_test)
print 'loss_train: %s' % loss_train
print 'loss_test: %s' % loss_test
if __name__ == '__main__':
# ctr_gbdt(
# from_cache=False,
# train_dataset_length=100000,
# test_dataset_length=100000,
# )
# ctr_pca_sgd(
# from_cache=False,
# train_dataset_length=100000,
# test_dataset_length=100000,
# )
# ctr_svm(
# model='sklearn-clicklog',
# from_cache=False,
# train_dataset_length=100000,
# test_dataset_length=100000,
# )
ctr_ftrl(
model='sklearn-clicklog',
from_cache=False,
train_dataset_length=100000,
test_dataset_length=100000,
)
| mit |
HackLinux/androguard | elsim/elsim/elsim.py | 37 | 16175 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug():
log_elsim.setLevel( logging.DEBUG )
def get_debug():
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x):
log_runtime.error(x)
raise()
def debug(x):
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors(object):
def __init__(self, x, ys):
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys:
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:]:
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self):
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:]:
#if cmp_values[idx] > 1.0:
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els):
e1 = {}
for i in els:
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim(object):
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so"):
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None:
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str):
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None:
if C in H_COMPRESSOR:
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else:
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self):
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0):
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements():
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ):
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ]:
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self):
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1]:
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2]:
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements():
for k in available_e2_elements:
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS]:
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self):
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS]:
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == []:
ret = False
if ret == False:
deleted_elements.append( j )
for j in deleted_elements:
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y):
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self):
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2]:
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS]:
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1]:
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS]:
if self.__checksort( diff_element, j ):
ok = False
break
if ok:
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS]:
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self):
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self):
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self):
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce):
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self):
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self):
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr):
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True):
print "\t", i.get_info()
if details:
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i):
l = []
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i):
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True):
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS]:
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values:
similarity_value += (1.0 - i)
if len(values) == 0:
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff(object):
def __init__(self, elsim, F):
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self):
for i, j in self.elsim.get_elements():
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self):
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ]:
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]:
print "\t",
i.show()
print
def get_added_elements(self):
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self):
return self.filters[ DELETED_ELEMENTS ]
| apache-2.0 |
michaelbramwell/sms-tools | lectures/06-Harmonic-model/plots-code/oboe-spectrum.py | 24 | 1032 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 1024
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2)/float(fs), x1, lw=1.5)
plt.axis([-hM1/float(fs), hM2/float(fs), min(x1), max(x1)])
plt.title('x (oboe-A4.wav)')
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX.size)/float(N), mX, 'r', lw=1.5)
plt.axis([0,fs/3,-90,max(mX)])
plt.title ('mX')
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX.size)/float(N), pX, 'c', lw=1.5)
plt.axis([0,fs/3,min(pX),18])
plt.title ('pX')
plt.tight_layout()
plt.savefig('oboe-spectrum.png')
plt.show()
| agpl-3.0 |
samfpetersen/gnuradio | gr-digital/examples/example_timing.py | 49 | 9180 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
from scipy import fftpack
class example_timing(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise,
foffset, toffset, poffset, mode=0):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
gain = bw
nfilts = 32
rrc_taps_rx = filter.firdes.root_raised_cosine(
nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.off = filter.fractional_resampler_cc(0.20, 1.0)
if mode == 0:
self.clk = digital.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx,
nfilts, nfilts//2, 1)
self.taps = self.clk.taps()
self.dtaps = self.clk.diff_taps()
self.delay = int(scipy.ceil(((len(rrc_taps)-1)/2 +
(len(self.taps[0])-1)/2)/float(sps))) + 1
self.vsnk_err = blocks.vector_sink_f()
self.vsnk_rat = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.connect((self.clk,2), self.vsnk_rat)
self.connect((self.clk,3), self.vsnk_phs)
else: # mode == 1
mu = 0.5
gain_mu = bw
gain_omega = 0.25*gain_mu*gain_mu
omega_rel_lim = 0.02
self.clk = digital.clock_recovery_mm_cc(sps, gain_omega,
mu, gain_mu,
omega_rel_lim)
self.vsnk_err = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_clk = blocks.vector_sink_c()
self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk)
self.connect(self.src, self.vsnk_src)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth (PFB) or gain (M&M) [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
parser.add_option("-M", "--mode", type="int", default=0,
help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_timing(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset,
options.mode)
put.run()
if options.mode == 0:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
data_rat = scipy.array(put.vsnk_rat.data()[20:])
data_phs = scipy.array(put.vsnk_phs.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "bo")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
delay = put.delay
m = len(data_clk.real)
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real[delay:], "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err, label="Error")
s3.plot(data_rat, 'r', label="Update rate")
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
s3.set_ylim([-0.5, 0.5])
s3.legend()
# Plot the clock recovery loop's error
s4 = f1.add_subplot(2,2,4)
s4.plot(data_phs)
s4.set_title("Clock Recovery Loop Filter Phase")
s4.set_xlabel("Samples")
s4.set_ylabel("Filter Phase")
diff_taps = put.dtaps
ntaps = len(diff_taps[0])
nfilts = len(diff_taps)
t = scipy.arange(0, ntaps*nfilts)
f3 = pylab.figure(3, figsize=(12,10), facecolor='w')
s31 = f3.add_subplot(2,1,1)
s32 = f3.add_subplot(2,1,2)
s31.set_title("Differential Filters")
s32.set_title("FFT of Differential Filters")
for i,d in enumerate(diff_taps):
D = 20.0*scipy.log10(1e-20+abs(fftpack.fftshift(fftpack.fft(d, 10000))))
s31.plot(t[i::nfilts].real, d, "-o")
s32.plot(D)
s32.set_ylim([-120, 10])
# If testing the M&M clock recovery loop
else:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "o")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real, "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
jbudynk/sherpa | sherpa/astro/datastack/__init__.py | 1 | 30542 | """
Manipulate a stack of data in Sherpa.
The methods in the DataStack class provide a way to automatically apply
familiar Sherpa commands such as `set_par`, `freeze`, or `plot_fit`
to a stack of datasets. This simplifies simultaneous fitting of
multiple datasets.
:Copyright: Smithsonian Astrophysical Observatory (2014,2015)
:Author: Tom Aldcroft ([email protected])
:Author: Omar Laurino ([email protected])
"""
## Copyright (c) 2010,2014,2015 Smithsonian Astrophysical Observatory
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of the Smithsonian Astrophysical Observatory nor the
## names of its contributors may be used to endorse or promote products
## derived from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import types
import re
import ConfigParser
import numpy
import sherpa
import sherpa.astro.ui as ui
import logging
import stk
# Configure logging for the module
def _config_logger(name, level, stream):
logger = logging.getLogger(name)
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
logger.setLevel(level)
logger.propagate = False
fmt = logging.Formatter('Datastack: %(message)s', None)
hdlr = logging.StreamHandler(stream)
# hdlr.setLevel(level)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return logger
logger = _config_logger(__name__, level=logging.WARNING, stream=sys.stdout)
def set_stack_verbosity(level):
logger.setLevel(level)
def set_stack_verbose(verbose=True):
"""Configure whether stack functions print informational messages.
:param verbose: print messages if True (default=True)
:returns: None
"""
if verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
# Get plot package
_cp = ConfigParser.ConfigParser()
_cp.read(sherpa.get_config())
_plot_pkg = _cp.get('options', 'plot_pkg')
if _plot_pkg == 'pylab':
import matplotlib.pyplot as plt
elif _plot_pkg == 'chips':
import pychips
from sherpa.plot import chips_backend
else:
raise ValueError('Unknown plot package {0}'.format(_plot_pkg))
# Global list of dataset ids in use
_all_dataset_ids = {}
id_str = '__ID'
def set_template_id(newid):
global id_str
id_str = newid
def create_stack_model(model, id_):
model_comps = {}
def _get_new_model(model, level=0):
if hasattr(model, 'parts'):
# Recursively descend through model and create new parts (as needed)
# corresponding to the stacked model components.
newparts = []
for part in model.parts:
newparts.append(_get_new_model(part, level+1))
if hasattr(model, 'op'):
return model.op(*newparts)
elif isinstance(model, sherpa.astro.instrument.RSPModelPHA):
return sherpa.astro.instrument.RSPModelPHA(rmf=model.rmf, model=newparts[0],
arf=model.arf, pha=model.pha)
elif isinstance(model, sherpa.astro.instrument.RMFModelPHA):
return sherpa.astro.instrument.RSPModelPHA(rmf=model.rmf, model=newparts[0],
arf=model.arf, pha=model.pha)
elif isinstance(model, sherpa.astro.instrument.ARFModelPHA):
return sherpa.astro.instrument.ARFModelPHA(rmf=model.rmf, model=newparts[0],
arf=model.arf, pha=model.pha)
else:
raise ValueError("Unexpected composite model {0} (not operator, ARF or RMF)".format(repr(model)))
else:
if isinstance(model, sherpa.models.model.ArithmeticConstantModel):
return model.val
try:
model_type, model_name_ID = model.name.split('.')
except ValueError:
raise ValueError('Model name "{0}" must be in format <model_type>.<name>'.format(model.name))
model_name = re.sub(id_str, str(id_), model_name_ID)
if id_str in model_name_ID:
try:
model = getattr(getattr(sherpa.astro.ui, model_type), model_name)
except AttributeError:
# Must be a user model, so use add_model to put a modelwrapper function into namespace
sherpa.astro.ui.add_model(type(model))
model = eval('{0}.{1}'.format(model_type, model_name))
model_name_no_ID = re.sub(id_str, "", model_name_ID)
model_comps[model_name_no_ID] = dict(model=model,
model_name=model_name)
return model
return _get_new_model(model), model_comps
class DataStack(object):
"""
Manipulate a stack of data in Sherpa.
"""
def __init__(self):
self.getitem_ids = None
self.datasets = []
self.dataset_ids = {} # Access datasets by ID
def __getitem__(self, item):
"""Overload datastack getitem ds[item(s)] to set self.filter_ids to a tuple
corresponding to the specified items.
"""
try:
ids = (item + '',)
except TypeError:
try:
ids = tuple(item)
except TypeError:
ids = (item, )
self.getitem_ids = ids
return self
def __del__(self):
for dataid in self.dataset_ids:
try:
del _all_dataset_ids[dataid]
except:
pass
def clear_models(self):
"""Clear all model components in the stack.
:returns: None
"""
for dataset in self.datasets:
dataset['model_comps'] = {}
def clear_stack(self):
"""Clear all datasets in the stack.
:returns: None
"""
for dataid in self.dataset_ids:
del _all_dataset_ids[dataid]
self.__init__()
def show_stack(self):
"""Show the data id and file name (where meaningful) for selected
datasets in stack.
:returns: None
"""
for dataset in self.filter_datasets():
obsid = "N/A"
time = "N/A"
if hasattr(dataset['data'], 'header') and "OBS_ID" in dataset['data'].header.keys():
obsid = dataset['data'].header['OBS_ID']
if hasattr(dataset['data'], 'header') and "MJD_OBS" in dataset['data'].header.keys():
time = dataset['data'].header['MJD_OBS']
print('{0}: {1} {2}: {3} {4}: {5}'.format(dataset['id'], dataset['data'].name, 'OBS_ID', obsid, "MJD_OBS", time))
def get_stack_ids(self):
"""Get the ids for all datasets in stack
:returns: list of ids
"""
return self.ids
@property
def ids(self):
"""List of ids corresponding to stack datasets.
Returns
-------
ids : array of int or str
The data set identifiers.
"""
return [x['id'] for x in self.datasets]
def _get_dataid(self):
if self.getitem_ids:
dataid = self.getitem_ids[0]
self.getitem_ids = None
else:
dataid = 1
while dataid in _all_dataset_ids:
dataid += 1
if dataid in self.dataset_ids:
raise ValueError('Data ID = {0} is already in the DataStack'.format(dataid))
return dataid
def _add_dataset(self, dataid):
dataset = dict(id=dataid, args=[], model_comps={}, data=ui.get_data(dataid))
_all_dataset_ids[dataid] = dataset
self.dataset_ids[dataid] = dataset
self.datasets.append(dataset)
def _load_func(self, func, *args, **kwargs):
dataid = self._get_dataid()
logger.info('Loading dataset id %s' % dataid)
func(dataid, *args, **kwargs)
self._add_dataset(dataid)
def load_arrays(self, *args, **kwargs):
if len(args) == 0:
raise AttributeError("load_arrays takes at least one argument (got none).")
if not hasattr(args[0], '__iter__'):
id, args = args[0], args[1:]
else:
id = None
if id is not None:
if self is DATASTACK:
ui.load_arrays(id, *args, **kwargs)
return
else:
raise AttributeError("When called from a datastack instance, an ID cannot be provided to a load function ("+id+")")
# Array Stack.
for arrays in args[0]:
dataid = self._get_dataid()
logger.info('Loading dataset id %s' % dataid)
ui.load_arrays(dataid, *arrays)
self._add_dataset(dataid)
def load_pha(self, id, arg=None, use_errors=False):
if arg is None:
id, arg = arg, id
if id is not None:
if self is DATASTACK:
ui.load_pha(id, arg, use_errors)
return
else:
raise AttributeError("When called from a datastack instance, an ID cannot be provided to a load function ("+id+")")
# File Stacks. If the file argument is a stack file, expand the file and call this function for each file
# in the stack.
try:
files = stk.build(arg)
for file in files:
self._load_func(ui.load_pha, file, use_errors)
except:
self._load_func(ui.load_pha, arg, use_errors)
def _load_func_factory(load_func):
"""Override a native Sherpa data loading function."""
def _load(self, *args, **kwargs):
if len(args)==1:
id, arg = None, args[0]
args=[]
if len(args)>1:
args = args[1:]
if id is not None:
if self is DATASTACK:
self._load_func(load_func, id, arg, *args, **kwargs)
return
else:
raise AttributeError("When called from a datastack instance, an ID cannot be provided to a load function ("+id+")")
# File Stacks. If the file argument is a stack file, expand the file and call this function for each file
# in the stack.
try:
files = stk.build(arg)
for file in files:
self._load_func(load_func, file, *args, **kwargs)
except:
self._load_func(load_func, arg, *args, **kwargs)
# def _load(self, *args, **kwargs):
# """Load a dataset and add to the datasets for stacked analysis.
# """
# dataid = self._get_dataid()
#
# logger.info('Loading dataset id %s' % dataid)
# out = load_func(dataid, *args, **kwargs)
#
# #dataset = dict(id=dataid, args=args, model_comps={}, data=ui.get_data(dataid))
# #dataset.update(kwargs) # no sherpa load func 'args' keyword so no conflict
# self._add_dataset(dataid)
#
# return out
_load.__name__ = load_func.__name__
_load.__doc__ = load_func.__doc__
return _load
# load_arrays = _load_func_factory(ui.load_arrays)
load_ascii = _load_func_factory(ui.load_ascii)
load_data = _load_func_factory(ui.load_data)
# load_image = _load_func_factory(ui.load_image)
# load_pha = _load_func_factory(ui.load_pha)
load_bkg = _load_func_factory(ui.load_bkg)
def _set_model_factory(func):
def wrapfunc(self, model):
"""
Run a model-setting function for each of the datasets.
:rtype: None
"""
datasets = self.filter_datasets()
try:
# if model is passed as a string
model = eval(model, globals(), ui.__dict__)
except TypeError:
pass
except Exception, exc:
raise type(exc)('Error converting model "{0}" '
'to a sherpa model object: {1}'.format(model, exc))
for dataset in datasets:
id_ = dataset['id']
logger.info('Setting stack model using {0}() for id={1}'.format(
func.__name__, id_))
new_model, model_comps = create_stack_model(model, id_)
func(id_, new_model)
dataset['model_comps'].update(model_comps)
return None
wrapfunc.__name__ = func.__name__
wrapfunc.__doc__ = func.__doc__
return wrapfunc
set_source = _set_model_factory(ui.set_source)
set_model = _set_model_factory(ui.set_model)
set_bkg_model = _set_model_factory(ui.set_bkg_model)
set_full_model = _set_model_factory(ui.set_full_model)
set_bkg_full_model = _set_model_factory(ui.set_bkg_full_model)
def filter_datasets(self):
"""Return filtered list of datasets as specified in the __getitem__
argument (via self.getitem_ids which gets set in __getitem__).
"""
if self.getitem_ids is None:
return self.datasets
filter_ids = self.getitem_ids
self.getitem_ids = None
try:
return [self.dataset_ids[x] for x in filter_ids]
except KeyError:
raise ValueError('IDs = {0} not contained in dataset IDs = {1}'.format(filter_ids, self.ids))
def _sherpa_cmd_factory(func):
def wrapfunc(self, *args, **kwargs):
"""
Apply an arbitrary Sherpa function to each of the datasets.
:rtype: List of results
"""
datasets = self.filter_datasets()
logger.info('Running {0} with args={1} and kwargs={2} for ids={3}'.format(
func.__name__, args, kwargs, [x['id'] for x in datasets]))
return [func(x['id'], *args, **kwargs) for x in datasets]
wrapfunc.__name__ = func.__name__
wrapfunc.__doc__ = func.__doc__
return wrapfunc
subtract = _sherpa_cmd_factory(ui.subtract)
notice = _sherpa_cmd_factory(ui.notice_id)
ignore = _sherpa_cmd_factory(ui.ignore_id)
get_arf = _sherpa_cmd_factory(ui.get_arf)
get_rmf = _sherpa_cmd_factory(ui.get_rmf)
get_response = _sherpa_cmd_factory(ui.get_response)
get_bkg_arf = _sherpa_cmd_factory(ui.get_bkg_arf)
get_bkg_rmf = _sherpa_cmd_factory(ui.get_bkg_rmf)
get_bkg = _sherpa_cmd_factory(ui.get_bkg)
get_source = _sherpa_cmd_factory(ui.get_source)
get_model = _sherpa_cmd_factory(ui.get_model)
get_bkg_model = _sherpa_cmd_factory(ui.get_bkg_model)
try:
get_bkg_scale = _sherpa_cmd_factory(ui.get_bkg_scale)
except AttributeError:
pass # not available for CIAO < 4.3
group_adapt = _sherpa_cmd_factory(ui.group_adapt)
group_adapt_snr = _sherpa_cmd_factory(ui.group_adapt_snr)
group_bins = _sherpa_cmd_factory(ui.group_bins)
group_counts = _sherpa_cmd_factory(ui.group_counts)
group_snr = _sherpa_cmd_factory(ui.group_snr)
group_width = _sherpa_cmd_factory(ui.group_width)
load_arf = _sherpa_cmd_factory(ui.load_arf)
load_rmf = _sherpa_cmd_factory(ui.load_rmf)
load_bkg_arf = _sherpa_cmd_factory(ui.load_bkg_arf)
load_bkg_rmf = _sherpa_cmd_factory(ui.load_bkg_rmf)
load_filter = _sherpa_cmd_factory(ui.load_filter)
load_grouping = _sherpa_cmd_factory(ui.load_grouping)
def _sherpa_par(self, func, par, msg, *args, **kwargs):
"""Apply ``func(*args)`` to all model component or model component parameters named ``mcpar``.
See thaw(), freeze(), set_par() and get_par() for examples.
:param func: Sherpa function that takes a full parameter name specification and
optional args, e.g. set_par() used as set_par('mekal_7.kt', 2.0)
:param par: Param name or model compoent name ('mekal.kt' or 'mekal')
:param msg: Format string to indicate action.
:param *args: Optional function arguments
:rtype: numpy array of function return values ordered by shell
"""
vals = par.split('.')
name = vals[0]
parname = (vals[1] if len(vals) > 1 else None)
if len(vals) > 2:
raise ValueError('Invalid parameter name specification "%s"' % par)
retvals = [] # return values
processed = set()
for dataset in self.filter_datasets():
model_comps = dataset['model_comps']
if name in model_comps:
model_name = model_comps[name]['model_name']
fullparname = '{0}.{1}'.format(model_name, parname) if parname else model_name
if fullparname not in processed:
if msg is not None:
logger.info(msg % fullparname)
retvals.append(func(fullparname, *args, **kwargs))
processed.add(fullparname)
return retvals
def thaw(self, *pars):
"""Apply thaw command to specified parameters for each dataset.
:param pars: parameter specifiers in format <model_type>.<par_name>
:rtype: None
"""
for par in pars:
self._sherpa_par(ui.thaw, par, 'Thawing %s')
def freeze(self, *pars):
"""Apply freeze command to specified parameters for each dataset.
:param pars: parameter specifiers in format <model_type>.<par_name>
:rtype: None
"""
for par in pars:
self._sherpa_par(ui.freeze, par, 'Freezing %s')
def _get_parname_attr_pars(self, par, msg):
parts = par.split('.')
if len(parts) == 1:
raise ValueError('par="%s" must be in the form "name.par" or "name.par.attr"' % par)
parname = '.'.join(parts[:-1])
attr = parts[-1]
return parname, attr, self._sherpa_par(eval, parname, msg % attr)
def get_par(self, par):
"""Get parameter attribute value for each dataset.
:param par: parameter specifier in format <model_type>.<par_name>
:rtype: None
"""
parname, attr, pars = self._get_parname_attr_pars(par, 'Getting %%s.%s')
return numpy.array([getattr(x, attr) for x in pars])
def set_par(self, par, val):
"""Set parameter attribute value for each dataset.
:param par: parameter spec: <model_type>.<attr> or <model_type>.<par>.<attr>
:param val: parameter value
:rtype: None
"""
parname, attr, pars = self._get_parname_attr_pars(par, 'Setting %%%%s.%%s = %s' % val)
for x in pars:
setattr(x, attr, val)
def link(self, par):
datasets = self.filter_datasets()
name, parname = par.split('.')
fullparname0 = '{0}.{1}'.format(datasets[0]['model_comps'][name]['model_name'], parname)
for dataset in datasets[1:]:
fullparname = '{0}.{1}'.format(dataset['model_comps'][name]['model_name'], parname)
if fullparname != fullparname0:
logger.info('Linking {0} => {1}'.format(fullparname, fullparname0))
ui.link(fullparname, fullparname0)
def unlink(self, par):
self._sherpa_par(ui.unlink, par, 'Unlinking %s')
def _sherpa_fit_func(func):
def _fit(self, *args, **kwargs):
"""Fit simultaneously all the datasets in the stack using the current
source models.
:args: additional args that get passed to the sherpa fit() routine
:kwargs: additional keyword args that get passed to the sherpa fit() routine
:rtype: None
"""
ids = tuple(x['id'] for x in self.filter_datasets())
func(*(ids + args), **kwargs)
_fit.__name__ = func.__name__
_fit.__doc__ = func.__doc__
return _fit
fit_bkg = _sherpa_fit_func(ui.fit_bkg)
fit = _sherpa_fit_func(ui.fit)
conf = _sherpa_fit_func(ui.conf)
def _print_window(self, *args, **kwargs):
"""Save figure for each dataset.
Here is the text for index.rst:
In the ``print_window`` command if a filename is supplied (for saving to a
set of files) it should have a ``#`` character which will be replaced by the
dataset ``id`` in each case.
:param args: list arguments to pass to print_window
:param kwargs: named (keyword) arguments to pass to print_window
:rtype: None
"""
orig_args = args
for dataset in self.filter_datasets():
args = orig_args
if len(args) > 0:
filename = re.sub(r'#', str(dataset['id']), args[0])
args = tuple([filename]) + args[1:]
if _plot_pkg == 'chips':
pychips.set_current_window(dataset['id'])
func = pychips.print_window
elif _plot_pkg == 'pylab':
plt.figure(self.ids.index(dataset['id']) + 1)
func = plt.savefig
else:
raise ValueError('Unknown plot package')
func(*args, **kwargs)
savefig = _print_window # matplotlib alias for print_window
def _sherpa_plot_func(func):
def _sherpa_plot(self, *args, **kwargs):
"""Call Sherpa plot ``func`` for each dataset.
:param func: Sherpa plot function
:param args: plot function list arguments
:param kwargs: plot function named (keyword) arguments
:rtype: None
"""
# FIXME We need to make sure ChIPS is initialized.
# As a hack, we just call begin() and end() on the chips backend
# To make sure the backend is initialized before we start creating windows.
if _plot_pkg == 'chips':
try:
chips_backend.begin()
finally:
chips_backend.end()
for dataset in self.filter_datasets():
if _plot_pkg == 'chips':
try:
pychips.add_window(['id', dataset['id']])
except RuntimeError:
pass # already exists
# window_id = pychips.ChipsId()
# window_id.window = dataset['id']
pychips.current_window(str(dataset['id']))
elif _plot_pkg == 'pylab':
plt.figure(self.ids.index(dataset['id']) + 1)
else:
raise ValueError('Unknown plot package')
func(dataset['id'], *args, **kwargs)
return _sherpa_plot
# log_scale = _sherpa_plot_func(pychips.log_scale)
# linear_scale = _sherpa_plot_func(pychips.linear_scale)
plot_arf = _sherpa_plot_func(ui.plot_arf)
plot_bkg_fit = _sherpa_plot_func(ui.plot_bkg_fit)
plot_bkg_ratio = _sherpa_plot_func(ui.plot_bkg_ratio)
plot_chisqr = _sherpa_plot_func(ui.plot_chisqr)
plot_fit_delchi = _sherpa_plot_func(ui.plot_fit_delchi)
plot_psf = _sherpa_plot_func(ui.plot_psf)
plot_bkg = _sherpa_plot_func(ui.plot_bkg)
plot_bkg_fit_delchi = _sherpa_plot_func(ui.plot_bkg_fit_delchi)
plot_bkg_resid = _sherpa_plot_func(ui.plot_bkg_resid)
plot_data = _sherpa_plot_func(ui.plot_data)
plot_fit_resid = _sherpa_plot_func(ui.plot_fit_resid)
plot_ratio = _sherpa_plot_func(ui.plot_ratio)
plot_bkg_chisqr = _sherpa_plot_func(ui.plot_bkg_chisqr)
plot_bkg_fit_resid = _sherpa_plot_func(ui.plot_bkg_fit_resid)
plot_bkg_source = _sherpa_plot_func(ui.plot_bkg_source)
plot_delchi = _sherpa_plot_func(ui.plot_delchi)
plot_model = _sherpa_plot_func(ui.plot_model)
plot_resid = _sherpa_plot_func(ui.plot_resid)
plot_bkg_delchi = _sherpa_plot_func(ui.plot_bkg_delchi)
plot_bkg_model = _sherpa_plot_func(ui.plot_bkg_model)
plot_bkg_source = _sherpa_plot_func(ui.plot_bkg_source)
plot_fit = _sherpa_plot_func(ui.plot_fit)
plot_order = _sherpa_plot_func(ui.plot_order)
plot_source = _sherpa_plot_func(ui.plot_source)
def _matplotlib_func(func, axis_cmd=False):
def _matplotlib(self, *args, **kwargs):
"""Call matplotlib plot ``func`` for each dataset.
:param func: Sherpa plot function
:param args: plot function list arguments
:param kwargs: plot function named (keyword) arguments
:rtype: None
"""
orig_args = args
for dataset in self.filter_datasets():
args = orig_args
if _plot_pkg != 'pylab':
raise ValueError('Plot package must be pylab')
if len(args) > 0:
try:
arg0 = re.sub('#', str(dataset['id']), args[0])
args = tuple([arg0]) + args[1:]
except TypeError:
pass
plt.figure(self.ids.index(dataset['id']) + 1)
if axis_cmd:
ax = plt.gca()
getattr(ax, func)(*args, **kwargs)
else:
func(*args, **kwargs)
return _matplotlib
if _plot_pkg == 'pylab':
plot_savefig = _matplotlib_func(plt.savefig)
plot_xlabel = _matplotlib_func(plt.xlabel)
plot_ylabel = _matplotlib_func(plt.ylabel)
plot_title = _matplotlib_func(plt.title)
plot_xlim = _matplotlib_func(plt.xlim)
plot_ylim = _matplotlib_func(plt.ylim)
plot_set_xscale = _matplotlib_func('set_xscale', axis_cmd=True)
plot_set_yscale = _matplotlib_func('set_yscale', axis_cmd=True)
def query(self, func):
output = []
for dataset in self.filter_datasets():
id = dataset['id']
if func(ui.get_data(id)):
output.append(id)
return output
def query_by_header_keyword(self, keyword, value):
def func(dataset):
if hasattr(dataset, 'header'):
if keyword in dataset.header.keys() and dataset.header[keyword] == str(value):
return True
return False
return self.query(func)
def query_by_obsid(self, value):
return self.query_by_header_keyword('OBS_ID', value)
_always_wrapped = ('load_pha', 'load_arrays', 'load_ascii', 'load_data', 'load_bkg')
# Use this and subsequent loop to wrap every function in sherpa.astro.ui with a datastack version
def _sherpa_ui_wrap(func):
def wrap(*args, **kwargs):
wrapfunc = func
if args:
if isinstance(args[0], DataStack):
datastack, args = args[0], args[1:]
# If the first argument is a list and it's either empty or made of non-iterables, then it's a datastack definition.
# If the list contains iterable it must be arrays for load_arrays.
elif isinstance(args[0], list) and not (len(args[0])>0 and hasattr(args[0][0],'__iter__')):
datastack, args = (DATASTACK[args[0]] if args[0] else DATASTACK), args[1:]
else:
if func.__name__ in _always_wrapped:
# some (all?) load_* functions must always be wrapped for file stack syntax check
# and for ensuring dataset id consistency.
datastack = DATASTACK
else:
return func(*args, **kwargs) # No stack specifier so use native sherpa func
try:
wrapfunc = getattr(datastack, func.__name__)
except AttributeError:
raise AttributeError(
'{0} is not a stack-enabled function.'.format(func.__name__))
return wrapfunc(*args, **kwargs)
wrap.__name__ = func.__name__
wrap.__doc__ = func.__doc__
return wrap
def _datastack_wrap(func):
def wrap(*args, **kwargs):
if not args:
args = ([],) + args
if isinstance(args[0], DataStack):
datastack, args = args[0], args[1:]
elif isinstance(args[0], list) and not (len(args[0])>0 and hasattr(args[0][0],'__iter__')):
datastack, args = (DATASTACK[args[0]] if args[0] else DATASTACK), args[1:]
else:
datastack = DATASTACK
# raise TypeError('First argument to {0} must be a list or datastack')
return getattr(datastack, func.__name__)(*args, **kwargs)
wrap.__name__ = func.__name__
wrap.__doc__ = func.__doc__
return wrap
DATASTACK = DataStack() # Default datastack
# Wrap all sherpa UI funcs and a few DataStack methods for command-line interface.
_module = sys.modules[__name__]
for attr in dir(ui):
func = getattr(ui, attr)
if type(func) == types.FunctionType:
setattr(_module, attr, _sherpa_ui_wrap(func))
for funcname in ['clear_stack', 'show_stack', 'get_stack_ids', 'query', 'query_by_header_keyword', 'query_by_obsid']:
setattr(_module, funcname, _datastack_wrap(getattr(DataStack, funcname)))
def clean():
DATASTACK.clear_models()
DATASTACK.clear_stack()
ui.clean()
logger.warning("clean() will invalidate any existing DataStack instances by removing all the datasets from the " +
"Sherpa session")
| gpl-3.0 |
ch3ll0v3k/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
softwaremechanic/Miscellaneous | ml-samples-examples/Scikits_examples/tv_viewers_predict.py | 1 | 1306 | # Required Packages
import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets, linear_model
# Function to get data
def get_data(file_name):
data = pd.read_csv(file_name)
flash_x_parameter = []
flash_y_parameter = []
arrow_x_parameter = []
arrow_y_parameter = []
for x1,y1,x2,y2 in zip(data['flash_episode_number'],data['flash_us_viewers'],data['arrow_episode_number'],data['arrow_us_viewers']):
flash_x_parameter.append([float(x1)])
flash_y_parameter.append(float(y1))
arrow_x_parameter.append([float(x2)])
arrow_y_parameter.append(float(y2))
return flash_x_parameter,flash_y_parameter,arrow_x_parameter,arrow_y_parameter
# Function to know which Tv show will have more viewers
def more_viewers(x1,y1,x2,y2):
regr1 = linear_model.LinearRegression()
regr1.fit(x1, y1)
predicted_value1 = regr1.predict(9)
print predicted_value1
regr2 = linear_model.LinearRegression()
regr2.fit(x2, y2)
predicted_value2 = regr2.predict(9)
#print predicted_value1
#print predicted_value2
if predicted_value1 > predicted_value2:
print "The Flash Tv Show will have more viewers for next week"
else:
print "Arrow Tv Show will have more viewers for next week"
| gpl-2.0 |
SuLab/scheduled-bots | scheduled_bots/GHR/bot.py | 1 | 8238 | from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from wikidataintegrator.ref_handlers import update_retrieved_if_new_multiple_refs
import pandas as pd
from pandas import read_csv
import requests
from tqdm import trange, tqdm
import xml.etree.ElementTree as et
import time
from datetime import datetime
import copy
datasrc = 'https://ghr.nlm.nih.gov/download/TopicIndex.xml'
## GHR inheritance codes to WD entities mapping
GHR_WD_codes = {'ac': 'Q13169788', ##wd:Q13169788 (codominant)
'ad': 'Q116406', ##wd:Q116406 (autosomal dominant)
'ar': 'Q15729064', ##wd:Q15729064 (autosomal recessive)
'm': 'Q15729075', ##wd:Q15729075 (mitochondrial)
'x': 'Q70899378', #wd:Q2597344 (X-linked inheritance)
'xd': 'Q3731276', ##wd:Q3731276 (X-linked dominant)
'xr': 'Q1988987', ##wd:Q1988987 (X-linked recessive)
'y': 'Q2598585'} ##wd:Q2598585 (Y linkage)
GHR_codes_no_WD = {'n': 'not inherited', 'u': 'unknown pattern'}
def create_reference(ghr_url):
refStatedIn = wdi_core.WDItemID(value="Q62606821", prop_nr="P248", is_reference=True)
timeStringNow = datetime.now().strftime("+%Y-%m-%dT00:00:00Z")
refRetrieved = wdi_core.WDTime(timeStringNow, prop_nr="P813", is_reference=True)
refURL = wdi_core.WDUrl(value=ghr_url, prop_nr="P854", is_reference=True)
return [refStatedIn, refRetrieved, refURL]
## Login for Scheduled bot
print("Logging in...")
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
## Retrieve topics from topic dump on NLM and parse
r = requests.get(datasrc)
xml = r.text
xtree = et.fromstring(xml)
topic_of_interest = 'Conditions'
for eachtopic in xtree.findall('topic'):
if eachtopic.attrib['id'] == topic_of_interest:
new_tree = eachtopic.find('topics')
conditions = new_tree
## Parse the topics url list
conditions_list = []
for condition in conditions.findall('topic'):
title = condition.find('title').text
url = condition.find('url').text
try:
synonyms = condition.find('other_names')
for synonym in synonyms:
tmpdict = {'title': title,'url':url,'aka':synonym.text}
conditions_list.append(tmpdict)
except:
tmpdict = {'title': title,'url':url,'aka':'None'}
conditions_list.append(tmpdict)
conditions_df = pd.DataFrame(conditions_list)
## Use NLM GHR API to pull xrefs and inheritance data
conditions_url_list = conditions_df['url'].unique().tolist()
condition_url_list_test = conditions_url_list[0:3]
inher_list = []
inher_fail = []
syn_fail = []
synonyms_df = pd.DataFrame(columns = ['topic','synonym'])
xref_list = []
xref_fail = []
u=0
for u in tqdm(range(len(conditions_url_list))):
eachurl = conditions_url_list[u]
tmpurl = eachurl+'?report=json'
tmpresponse = requests.get(tmpurl)
data = tmpresponse.json()
## save the inheritance pattern data
try:
pattern_nos = data['inheritance-pattern-list']
i=0
while i < len(pattern_nos):
inher_dict = pattern_nos[i]['inheritance-pattern']
inher_dict['topic']=data['name']
inher_dict['url'] = eachurl
inher_list.append(inher_dict)
i=i+1
except:
inher_fail.append({'topic':data['name'],'url':eachurl})
## save the synonym list
try:
synlist = data['synonym-list']
syndf = pd.DataFrame(synlist)
syndf['topic']=data['name']
synonyms_df = pd.concat((synonyms_df,syndf),ignore_index=True)
except:
syn_fail.append({'topic':data['name'],'url':eachurl})
## save the xrefs
try:
xreflist = data['db-key-list']
k=0
while k < len(xreflist):
tmpdict = xreflist[k]['db-key']
tmpdict['topic'] = data['name']
tmpdict['url'] = eachurl
xref_list.append(tmpdict)
k=k+1
except:
xref_fail.append({'topic':data['name'],'url':eachurl})
u=u+1
inheritance_df = pd.DataFrame(inher_list)
inher_fail_df = pd.DataFrame(inher_fail)
syn_fail_df = pd.DataFrame(syn_fail)
xref_list_df = pd.DataFrame(xref_list)
xref_fail_df = pd.DataFrame(xref_fail)
#### Use xrefs pulled from the API to map the url to Wikidata Entities
## Drop topics that map to the same url (assuming they're synonyms)
xref_no_dups = xref_list_df.drop_duplicates()
print("original df size: ",len(xref_list_df),"de-duplicated url df size: ",len(xref_no_dups))
## Use Orphanet IDs to pull Wikidata Entities
## Generate list of unique Orphanet IDs
orphanet_ghr = xref_no_dups.loc[xref_no_dups['db']=='Orphanet']
no_orphanet_dups = orphanet_ghr.drop_duplicates('url')
print("Original Orphanet Xref list: ", len(orphanet_ghr), "Orphanet Xref list less dups: ",len(no_orphanet_dups))
orphanet_id_list = no_orphanet_dups['key'].tolist()
# Retrieve the QIDs for each Orphanet ID (The property for Orphanet IDs is P1550)
i=0
wdmap = []
wdmapfail = []
for i in tqdm(range(len(orphanet_id_list))):
orph_id = orphanet_id_list[i]
try:
sparqlQuery = "SELECT * WHERE {?topic wdt:P1550 \""+orph_id+"\"}"
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
orpha_qid = result["results"]["bindings"][0]["topic"]["value"].replace("http://www.wikidata.org/entity/", "")
wdmap.append({'Orphanet':orph_id,'WDID':orpha_qid})
except:
wdmapfail.append(orph_id)
i=i+1
## Inspect the results for mapping or coverage issues
wdid_orpha_df = pd.DataFrame(wdmap)
print("resulting mapping table has: ",len(wdid_orpha_df)," rows.")
#### Add Mode of Inheritance data from GHR to Wikidata
## De-duplicate to remove anything with mapping issues
wd_orpha_no_dups = wdid_orpha_df.drop_duplicates('Orphanet').copy()
wd_orpha_no_dups.drop_duplicates('WDID')
print('de-duplicated table: ',len(wd_orpha_no_dups))
## Merge with Inheritance table
no_orphanet_dups.rename(columns={'key':'Orphanet'}, inplace=True)
inher_wd_db = inheritance_df.merge(wd_orpha_no_dups.merge(no_orphanet_dups,on='Orphanet',how='inner'), on=['url','topic'], how='inner')
print("resulting mapped table: ",len(inher_wd_db))
## Limit adding mode of inheritance statements to diseases with known modes of inheritance
inheritance_avail = inher_wd_db.loc[(inher_wd_db['code']!='n')&(inher_wd_db['code']!='u')]
print(len(inheritance_avail))
## Perform the entity look up and write the inheritance mode statement
i=0
for i in tqdm(range(len(inheritance_avail))):
disease_qid = inheritance_avail.iloc[i]['WDID']
inheritance_method = GHR_WD_codes[inheritance_avail.iloc[i]['code']]
ghr_url = inheritance_avail.iloc[i]['url']
reference = create_reference(ghr_url)
statement = [wdi_core.WDItemID(value=inheritance_method, prop_nr="P1199", references=[copy.deepcopy(reference)])]
item = wdi_core.WDItemEngine(wd_item_id=disease_qid, data=statement, append_value="P1199",
global_ref_mode='CUSTOM', ref_handler=update_retrieved_if_new_multiple_refs)
item.write(login)
i=i+1
#### Add GHR disease/conditions urls (once property has been created and approved)
## Load successfully mapped GHR disease urls
mapped_orpha_urls = wd_orpha_no_dups.merge(no_orphanet_dups,on='Orphanet',how='inner')
i=0
for i in tqdm(range(len(mapped_orpha_urls))):
disease_qid = mapped_orpha_urls.iloc[i]['WDID']
ghr_url = mapped_orpha_urls.iloc[i]['url']
ghr_id = mapped_orpha_urls.iloc[0]['url'].replace("https://ghr.nlm.nih.gov/condition/","")
reference = create_reference(ghr_url)
url_prop = "P7464"
statement = [wdi_core.WDString(value=ghr_id, prop_nr=url_prop, references=[copy.deepcopy(reference)])]
item = wdi_core.WDItemEngine(wd_item_id=disease_qid, data=statement, append_value=url_prop,
global_ref_mode='CUSTOM', ref_handler=update_retrieved_if_new_multiple_refs)
item.write(login)
i=i+1
| mit |
gespinoza/davgis | functions.py | 3 | 29444 | # -*- coding: utf-8 -*-
"""
Authors: Gonzalo E. Espinoza-Dávalos
Contact: [email protected], [email protected]
Repository: https://github.com/gespinoza/davgis
Module: davgis
Description:
This module is a python wrapper to simplify scripting and automation of common
GIS workflows used in water resources.
"""
from __future__ import division
import os
import math
import tempfile
import warnings
import ogr
import osr
import gdal
import pandas as pd
import netCDF4
from scipy.interpolate import griddata
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
np = pd.np
def Buffer(input_shp, output_shp, distance):
"""
Creates a buffer of the input shapefile by a given distance
"""
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_lyr_defn = inp_lyr.GetLayerDefn()
inp_srs = inp_lyr.GetSpatialRef()
# Output
out_name = os.path.splitext(os.path.basename(output_shp))[0]
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(output_shp):
out_driver.DeleteDataSource(output_shp)
out_source = out_driver.CreateDataSource(output_shp)
out_lyr = out_source.CreateLayer(out_name, inp_srs, ogr.wkbPolygon)
out_lyr_defn = out_lyr.GetLayerDefn()
# Add fields
for i in range(inp_lyr_defn.GetFieldCount()):
field_defn = inp_lyr_defn.GetFieldDefn(i)
out_lyr.CreateField(field_defn)
# Add features
for i in range(inp_lyr.GetFeatureCount()):
feature_inp = inp_lyr.GetNextFeature()
geometry = feature_inp.geometry()
feature_out = ogr.Feature(out_lyr_defn)
for j in range(0, out_lyr_defn.GetFieldCount()):
feature_out.SetField(out_lyr_defn.GetFieldDefn(j).GetNameRef(),
feature_inp.GetField(j))
feature_out.SetGeometry(geometry.Buffer(distance))
out_lyr.CreateFeature(feature_out)
feature_out = None
# Save and/or close the data sources
inp_source = None
out_source = None
# Return
return output_shp
def Feature_to_Raster(input_shp, output_tiff,
cellsize, field_name=False, NoData_value=-9999):
"""
Converts a shapefile into a raster
"""
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal.GDT_Int16)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr,
options=["ATTRIBUTE={0}".format(field_name)])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
# Save and/or close the data sources
inp_source = None
out_source = None
# Return
return output_tiff
def List_Fields(input_lyr):
"""
Lists the field names of input layer
"""
# Input
if isinstance(input_lyr, str):
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_lyr, 0)
inp_lyr = inp_source.GetLayer()
inp_lyr_defn = inp_lyr.GetLayerDefn()
elif isinstance(input_lyr, ogr.Layer):
inp_lyr_defn = input_lyr.GetLayerDefn()
# List
names_ls = []
# Loop
for j in range(0, inp_lyr_defn.GetFieldCount()):
field_defn = inp_lyr_defn.GetFieldDefn(j)
names_ls.append(field_defn.GetName())
# Save and/or close the data sources
inp_source = None
# Return
return names_ls
def Raster_to_Array(input_tiff, ll_corner, x_ncells, y_ncells,
values_type='float32'):
"""
Loads a raster into a numpy array
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_data_type = inp_band.DataType
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
ll_x = ll_corner[0]
ll_y = ll_corner[1]
top_left_x = ll_x
top_left_y = ll_y - cellsize_y*y_ncells
# Change start point
temp_path = tempfile.mkdtemp()
temp_driver = gdal.GetDriverByName('GTiff')
temp_tiff = os.path.join(temp_path, os.path.basename(input_tiff))
temp_source = temp_driver.Create(temp_tiff, x_ncells, y_ncells,
1, inp_data_type)
temp_source.GetRasterBand(1).SetNoDataValue(NoData_value)
temp_source.SetGeoTransform((top_left_x, cellsize_x, rot_1,
top_left_y, rot_2, cellsize_y))
temp_source.SetProjection(inp_srs)
# Snap
gdal.ReprojectImage(inp_lyr, temp_source, inp_srs, inp_srs,
gdal.GRA_Bilinear)
temp_source = None
# Read array
d_type = pd.np.dtype(values_type)
out_lyr = gdal.Open(temp_tiff)
array = out_lyr.ReadAsArray(0, 0, out_lyr.RasterXSize,
out_lyr.RasterYSize).astype(d_type)
array[pd.np.isclose(array, NoData_value)] = pd.np.nan
out_lyr = None
return array
def Resample(input_tiff, output_tiff, cellsize, method=None,
NoData_value=-9999):
"""
Resamples a raster to a different spatial resolution
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
# NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
x_ncells = int(math.floor(x_tot_n * (cellsize_x/cellsize)))
y_ncells = int(math.floor(y_tot_n * (-cellsize_y/cellsize)))
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_source.GetRasterBand(1).SetNoDataValue(NoData_value)
out_source.SetGeoTransform((top_left_x, cellsize, rot_1,
top_left_y, rot_2, -cellsize))
out_source.SetProjection(inp_srs)
# Resampling
method_dict = {'NearestNeighbour': gdal.GRA_NearestNeighbour,
'Bilinear': gdal.GRA_Bilinear,
'Cubic': gdal.GRA_Cubic,
'CubicSpline': gdal.GRA_CubicSpline,
'Lanczos': gdal.GRA_Lanczos,
'Average': gdal.GRA_Average,
'Mode': gdal.GRA_Mode}
if method in range(6):
method_sel = method
elif method in method_dict.keys():
method_sel = method_dict[method]
else:
warnings.warn('Using default interpolation method: Nearest Neighbour')
method_sel = 0
gdal.ReprojectImage(inp_lyr, out_source, inp_srs, inp_srs, method_sel)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Array_to_Raster(input_array, output_tiff, ll_corner, cellsize,
srs_wkt):
"""
Saves an array into a raster file
"""
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
y_ncells, x_ncells = input_array.shape
gdal_datatype = gdaltype_from_dtype(input_array.dtype)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal_datatype)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(-9999)
out_top_left_x = ll_corner[0]
out_top_left_y = ll_corner[1] + cellsize*y_ncells
out_source.SetGeoTransform((out_top_left_x, cellsize, 0,
out_top_left_y, 0, -cellsize))
out_source.SetProjection(str(srs_wkt))
out_band.WriteArray(input_array)
# Save and/or close the data sources
out_source = None
# Return
return output_tiff
def Clip(input_tiff, output_tiff, bbox):
"""
Clips a raster given a bounding box
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
# Bounding box
xmin, ymin, xmax, ymax = bbox
# Get indices, number of cells, and top left corner
x1 = max([0, int(math.floor((xmin - top_left_x)/cellsize_x))])
x2 = min([x_tot_n, int(math.ceil((xmax - top_left_x)/cellsize_x))])
y1 = max([0, int(math.floor((ymax - top_left_y)/cellsize_y))])
y2 = min([y_tot_n, int(math.ceil((ymin - top_left_y)/cellsize_y))])
x_ncells = x2 - x1
y_ncells = y2 - y1
out_top_left_x = top_left_x + x1*cellsize_x
out_top_left_y = top_left_y + y1*cellsize_y
# Output
out_array = inp_array[y1:y2, x1:x2]
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform((out_top_left_x, cellsize_x, rot_1,
out_top_left_y, rot_2, cellsize_y))
out_source.SetProjection(inp_srs)
out_band.WriteArray(out_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Raster_to_Points(input_tiff, output_shp):
"""
Converts a raster to a point shapefile
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
top_left_x = transform[0]
cellsize_x = transform[1]
top_left_y = transform[3]
cellsize_y = transform[5]
NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
top_left_x_center = top_left_x + cellsize_x/2.0
top_left_y_center = top_left_y + cellsize_y/2.0
# Read array
array = inp_lyr.ReadAsArray(0, 0, x_tot_n, y_tot_n) # .astype(pd.np.float)
array[pd.np.isclose(array, NoData_value)] = pd.np.nan
# Output
out_srs = osr.SpatialReference()
out_srs.ImportFromWkt(inp_srs)
out_name = os.path.splitext(os.path.basename(output_shp))[0]
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(output_shp):
out_driver.DeleteDataSource(output_shp)
out_source = out_driver.CreateDataSource(output_shp)
out_lyr = out_source.CreateLayer(out_name, out_srs, ogr.wkbPoint)
ogr_field_type = ogrtype_from_dtype(array.dtype)
Add_Field(out_lyr, "RASTERVALU", ogr_field_type)
out_lyr_defn = out_lyr.GetLayerDefn()
# Add features
for xi in range(x_tot_n):
for yi in range(y_tot_n):
value = array[yi, xi]
if ~pd.np.isnan(value):
feature_out = ogr.Feature(out_lyr_defn)
feature_out.SetField2(0, value)
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(top_left_x_center + xi*cellsize_x,
top_left_y_center + yi*cellsize_y)
feature_out.SetGeometry(point)
out_lyr.CreateFeature(feature_out)
feature_out = None
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_shp
def Add_Field(input_lyr, field_name, ogr_field_type):
"""
Add a field to a layer using the following ogr field types:
0 = ogr.OFTInteger
1 = ogr.OFTIntegerList
2 = ogr.OFTReal
3 = ogr.OFTRealList
4 = ogr.OFTString
5 = ogr.OFTStringList
6 = ogr.OFTWideString
7 = ogr.OFTWideStringList
8 = ogr.OFTBinary
9 = ogr.OFTDate
10 = ogr.OFTTime
11 = ogr.OFTDateTime
"""
# List fields
fields_ls = List_Fields(input_lyr)
# Check if field exist
if field_name in fields_ls:
raise Exception('Field: "{0}" already exists'.format(field_name))
# Create field
inp_field = ogr.FieldDefn(field_name, ogr_field_type)
input_lyr.CreateField(inp_field)
return inp_field
def Spatial_Reference(epsg, return_string=True):
"""
Obtain a spatial reference from the EPSG parameter
"""
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
if return_string:
return srs.ExportToWkt()
else:
return srs
def List_Datasets(path, ext):
"""
List the data sets in a folder
"""
datsets_ls = []
for f in os.listdir(path):
if os.path.splitext(f)[1][1:] == ext:
datsets_ls.append(f)
return datsets_ls
def NetCDF_to_Raster(input_nc, output_tiff, ras_variable,
x_variable='longitude', y_variable='latitude',
crs={'variable': 'crs', 'wkt': 'crs_wkt'}, time=None):
"""
Extract a layer from a netCDF file and save it as a raster file.
For temporal netcdf files, use the 'time' parameter as:
t = {'variable': 'time_variable', 'value': '30/06/2017'}
"""
# Input
inp_nc = netCDF4.Dataset(input_nc, 'r')
inp_values = inp_nc.variables[ras_variable]
x_index = inp_values.dimensions.index(x_variable)
y_index = inp_values.dimensions.index(y_variable)
if not time:
inp_array = inp_values[:]
else:
time_variable = time['variable']
time_value = time['value']
t_index = inp_values.dimensions.index(time_variable)
time_index = list(inp_nc.variables[time_variable][:]).index(time_value)
if t_index == 0:
inp_array = inp_values[time_index, :, :]
elif t_index == 1:
inp_array = inp_values[:, time_index, :]
elif t_index == 2:
inp_array = inp_values[:, :, time_index]
else:
raise Exception("The array has more dimensions than expected")
# Transpose array if necessary
if y_index > x_index:
inp_array = pd.np.transpose(inp_array)
# Additional parameters
gdal_datatype = gdaltype_from_dtype(inp_array.dtype)
NoData_value = inp_nc.variables[ras_variable]._FillValue
if type(crs) == str:
srs_wkt = crs
else:
crs_variable = crs['variable']
crs_wkt = crs['wkt']
exec('srs_wkt = str(inp_nc.variables["{0}"].{1})'.format(crs_variable,
crs_wkt))
inp_x = inp_nc.variables[x_variable]
inp_y = inp_nc.variables[y_variable]
cellsize_x = abs(pd.np.mean([inp_x[i] - inp_x[i-1]
for i in range(1, len(inp_x))]))
cellsize_y = -abs(pd.np.mean([inp_y[i] - inp_y[i-1]
for i in range(1, len(inp_y))]))
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
y_ncells, x_ncells = inp_array.shape
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal_datatype)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(pd.np.asscalar(NoData_value))
out_top_left_x = inp_x[0] - cellsize_x/2.0
if inp_y[-1] > inp_y[0]:
out_top_left_y = inp_y[-1] - cellsize_y/2.0
inp_array = pd.np.flipud(inp_array)
else:
out_top_left_y = inp_y[0] - cellsize_y/2.0
out_source.SetGeoTransform((out_top_left_x, cellsize_x, 0,
out_top_left_y, 0, cellsize_y))
out_source.SetProjection(srs_wkt)
out_band.WriteArray(inp_array)
out_band.ComputeStatistics(True)
# Save and/or close the data sources
inp_nc.close()
out_source = None
# Return
return output_tiff
def Apply_Filter(input_tiff, output_tiff, number_of_passes):
"""
Smooth a raster by replacing cell value by the average value of the
surrounding cells
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
x_ncells = inp_lyr.RasterXSize
y_ncells = inp_lyr.RasterYSize
# Filter
inp_array[inp_array == NoData_value] = pd.np.nan
out_array = array_filter(inp_array, number_of_passes)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform((top_left_x, cellsize_x, rot_1,
top_left_y, rot_2, cellsize_y))
out_source.SetProjection(inp_srs)
out_band.WriteArray(out_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Extract_Band(input_tiff, output_tiff, band_number=1):
"""
Extract and save a raster band into a new raster
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(band_number)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
NoData_value = inp_band.GetNoDataValue()
x_ncells = inp_lyr.RasterXSize
y_ncells = inp_lyr.RasterYSize
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform(inp_transform)
out_source.SetProjection(inp_srs)
out_band.WriteArray(inp_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Get_Extent(input_lyr):
"""
Obtain the input layer extent (xmin, ymin, xmax, ymax)
"""
# Input
filename, ext = os.path.splitext(input_lyr)
if ext.lower() == '.shp':
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_lyr)
inp_lyr = inp_source.GetLayer()
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
inp_lyr = None
inp_source = None
elif ext.lower() == '.tif':
inp_lyr = gdal.Open(input_lyr)
inp_transform = inp_lyr.GetGeoTransform()
x_min = inp_transform[0]
x_max = x_min + inp_transform[1] * inp_lyr.RasterXSize
y_max = inp_transform[3]
y_min = y_max + inp_transform[5] * inp_lyr.RasterYSize
inp_lyr = None
else:
raise Exception('The input data type is not recognized')
return (x_min, y_min, x_max, y_max)
def Interpolation_Default(input_shp, field_name, output_tiff,
method='nearest', cellsize=None):
'''
Interpolate point data into a raster
Available methods: 'nearest', 'linear', 'cubic'
'''
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
inp_wkt = inp_srs.ExportToWkt()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
ll_corner = [x_min, y_min]
if not cellsize:
cellsize = min(x_max - x_min, y_max - y_min)/25.0
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Feature points
x = []
y = []
z = []
for i in range(inp_lyr.GetFeatureCount()):
feature_inp = inp_lyr.GetNextFeature()
point_inp = feature_inp.geometry().GetPoint()
x.append(point_inp[0])
y.append(point_inp[1])
z.append(feature_inp.GetField(field_name))
x = pd.np.array(x)
y = pd.np.array(y)
z = pd.np.array(z)
# Grid
X, Y = pd.np.meshgrid(pd.np.linspace(x_min + cellsize/2.0,
x_max - cellsize/2.0,
x_ncells),
pd.np.linspace(y_min + cellsize/2.0,
y_max - cellsize/2.0,
y_ncells))
# Interpolate
out_array = griddata((x, y), z, (X, Y), method=method)
out_array = pd.np.flipud(out_array)
# Save raster
Array_to_Raster(out_array, output_tiff, ll_corner, cellsize, inp_wkt)
# Return
return output_tiff
def Kriging_Interpolation_Points(input_shp, field_name, output_tiff, cellsize,
bbox=None):
"""
Interpolate point data using Ordinary Kriging
Reference: https://cran.r-project.org/web/packages/automap/automap.pdf
"""
# Spatial reference
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
srs_wkt = inp_srs.ExportToWkt()
inp_source = None
# Temp folder
temp_dir = tempfile.mkdtemp()
temp_points_tiff = os.path.join(temp_dir, 'points_ras.tif')
# Points to raster
Feature_to_Raster(input_shp, temp_points_tiff,
cellsize, field_name, -9999)
# Raster extent
if bbox:
xmin, ymin, xmax, ymax = bbox
ll_corner = [xmin, ymin]
x_ncells = int(math.ceil((xmax - xmin)/cellsize))
y_ncells = int(math.ceil((ymax - ymin)/cellsize))
else:
temp_lyr = gdal.Open(temp_points_tiff)
x_min, x_max, y_min, y_max = temp_lyr.GetExtent()
ll_corner = [x_min, y_min]
x_ncells = temp_lyr.RasterXSize
y_ncells = temp_lyr.RasterYSize
temp_lyr = None
# Raster to array
points_array = Raster_to_Array(temp_points_tiff, ll_corner,
x_ncells, y_ncells, values_type='float32')
# Run kriging
x_vector = np.arange(xmin + cellsize/2, xmax + cellsize/2, cellsize)
y_vector = np.arange(ymin + cellsize/2, ymax + cellsize/2, cellsize)
out_array = Kriging_Interpolation_Array(points_array, x_vector, y_vector)
# Save array as raster
Array_to_Raster(out_array, output_tiff, ll_corner, cellsize, srs_wkt)
# Return
return output_tiff
def Kriging_Interpolation_Array(input_array, x_vector, y_vector):
"""
Interpolate data in an array using Ordinary Kriging
Reference: https://cran.r-project.org/web/packages/automap/automap.pdf
"""
# Total values in array
n_values = np.isfinite(input_array).sum()
# Load function
pandas2ri.activate()
robjects.r('''
library(gstat)
library(sp)
library(automap)
kriging_interpolation <- function(x_vec, y_vec, values_arr,
n_values){
# Parameters
shape <- dim(values_arr)
counter <- 1
df <- data.frame(X=numeric(n_values),
Y=numeric(n_values),
INFZ=numeric(n_values))
# Save values into a data frame
for (i in seq(shape[2])) {
for (j in seq(shape[1])) {
if (is.finite(values_arr[j, i])) {
df[counter,] <- c(x_vec[i], y_vec[j], values_arr[j, i])
counter <- counter + 1
}
}
}
# Grid
coordinates(df) = ~X+Y
int_grid <- expand.grid(x_vec, y_vec)
names(int_grid) <- c("X", "Y")
coordinates(int_grid) = ~X+Y
gridded(int_grid) = TRUE
# Kriging
krig_output <- autoKrige(INFZ~1, df, int_grid)
# Array
values_out <- matrix(krig_output$krige_output$var1.pred,
nrow=length(y_vec),
ncol=length(x_vec),
byrow = TRUE)
return(values_out)
}
''')
kriging_interpolation = robjects.r['kriging_interpolation']
# Execute kriging function and get array
r_array = kriging_interpolation(x_vector, y_vector, input_array, n_values)
array_out = np.array(r_array)
# Return
return array_out
def get_neighbors(x, y, nx, ny, cells=1):
"""
Get a list of neighboring cells
"""
neighbors_ls = [(xi, yi)
for xi in range(x - 1 - cells + 1, x + 2 + cells - 1)
for yi in range(y - 1 - cells + 1, y + 2 + cells - 1)
if (-1 < x <= nx - 1 and -1 < y <= ny - 1 and
(x != xi or y != yi) and
(0 <= xi <= nx - 1) and (0 <= yi <= ny - 1))]
return neighbors_ls
def get_mean_neighbors(array, index, include_cell=False):
"""
Get the mean value of neighboring cells
"""
xi, yi = index
nx, ny = array.shape
stay = True
cells = 1
while stay:
neighbors_ls = get_neighbors(xi, yi, nx, ny, cells)
if include_cell:
neighbors_ls = neighbors_ls + [(xi, yi)]
values_ls = [array[i] for i in neighbors_ls]
if pd.np.isnan(values_ls).all():
cells += 1
else:
value = pd.np.nanmean(values_ls)
stay = False
return value
def array_filter(array, number_of_passes=1):
"""
Smooth cell values by replacing each cell value by the average value of the
surrounding cells
"""
while number_of_passes >= 1:
ny, nx = array.shape
arrayf = pd.np.empty(array.shape)
arrayf[:] = pd.np.nan
for j in range(ny):
for i in range(nx):
arrayf[j, i] = get_mean_neighbors(array, (j, i), True)
array[:] = arrayf[:]
number_of_passes -= 1
return arrayf
def ogrtype_from_dtype(d_type):
"""
Return the ogr data type from the numpy dtype
"""
# ogr field type
if 'float' in d_type.name:
ogr_data_type = 2
elif 'int' in d_type.name:
ogr_data_type = 0
elif 'string' in d_type.name:
ogr_data_type = 4
elif 'bool' in d_type.name:
ogr_data_type = 8
else:
raise Exception('"{0}" is not recognized'.format(d_type))
return ogr_data_type
def gdaltype_from_dtype(d_type):
"""
Return the gdal data type from the numpy dtype
"""
# gdal field type
if 'int8' == d_type.name:
gdal_data_type = 1
elif 'uint16' == d_type.name:
gdal_data_type = 2
elif 'int16' == d_type.name:
gdal_data_type = 3
elif 'uint32' == d_type.name:
gdal_data_type = 4
elif 'int32' == d_type.name:
gdal_data_type = 5
elif 'float32' == d_type.name:
gdal_data_type = 6
elif 'float64' == d_type.name:
gdal_data_type = 7
elif 'bool' in d_type.name:
gdal_data_type = 1
elif 'int' in d_type.name:
gdal_data_type = 5
elif 'float' in d_type.name:
gdal_data_type = 7
elif 'complex' == d_type.name:
gdal_data_type = 11
else:
warnings.warn('"{0}" is not recognized. '
'"Unknown" data type used'.format(d_type))
gdal_data_type = 0
return gdal_data_type
| apache-2.0 |
huzq/scikit-learn | sklearn/tests/test_metaestimators.py | 9 | 5337 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.utils._testing import assert_raises
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
from sklearn.exceptions import NotFittedError
class DelegatorData:
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
check_is_fitted(self)
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, y, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in SubEstimator.__dict__.keys()
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert hasattr(delegate, method)
assert hasattr(delegator, method), (
"%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises a NotFittedError
if method == 'score':
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0],
delegator_data.fit_args[1])
else:
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
if method == 'score':
getattr(delegator, method)(delegator_data.fit_args[0],
delegator_data.fit_args[1])
else:
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert not hasattr(delegate, method)
assert not hasattr(delegator, method), (
"%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
tdhopper/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
hammerlab/fancyimpute | fancyimpute/soft_impute.py | 2 | 6529 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.utils.extmath import randomized_svd
from sklearn.utils import check_array
from .common import masked_mae
from .solver import Solver
F32PREC = np.finfo(np.float32).eps
class SoftImpute(Solver):
"""
Implementation of the SoftImpute algorithm from:
"Spectral Regularization Algorithms for Learning Large Incomplete Matrices"
by Mazumder, Hastie, and Tibshirani.
"""
def __init__(
self,
shrinkage_value=None,
convergence_threshold=0.001,
max_iters=100,
max_rank=None,
n_power_iterations=1,
init_fill_method="zero",
min_value=None,
max_value=None,
normalizer=None,
verbose=True):
"""
Parameters
----------
shrinkage_value : float
Value by which we shrink singular values on each iteration. If
omitted then the default value will be the maximum singular
value of the initialized matrix (zeros for missing values) divided
by 50.
convergence_threshold : float
Minimum ration difference between iterations (as a fraction of
the Frobenius norm of the current solution) before stopping.
max_iters : int
Maximum number of SVD iterations
max_rank : int, optional
Perform a truncated SVD on each iteration with this value as its
rank.
n_power_iterations : int
Number of power iterations to perform with randomized SVD
init_fill_method : str
How to initialize missing values of data matrix, default is
to fill them with zeros.
min_value : float
Smallest allowable value in the solution
max_value : float
Largest allowable value in the solution
normalizer : object
Any object (such as BiScaler) with fit() and transform() methods
verbose : bool
Print debugging info
"""
Solver.__init__(
self,
fill_method=init_fill_method,
min_value=min_value,
max_value=max_value,
normalizer=normalizer)
self.shrinkage_value = shrinkage_value
self.convergence_threshold = convergence_threshold
self.max_iters = max_iters
self.max_rank = max_rank
self.n_power_iterations = n_power_iterations
self.verbose = verbose
def _converged(self, X_old, X_new, missing_mask):
# check for convergence
old_missing_values = X_old[missing_mask]
new_missing_values = X_new[missing_mask]
difference = old_missing_values - new_missing_values
ssd = np.sum(difference ** 2)
old_norm = np.sqrt((old_missing_values ** 2).sum())
# edge cases
if old_norm == 0 or (old_norm < F32PREC and np.sqrt(ssd) > F32PREC):
return False
else:
return (np.sqrt(ssd) / old_norm) < self.convergence_threshold
def _svd_step(self, X, shrinkage_value, max_rank=None):
"""
Returns reconstructed X from low-rank thresholded SVD and
the rank achieved.
"""
if max_rank:
# if we have a max rank then perform the faster randomized SVD
(U, s, V) = randomized_svd(
X,
max_rank,
n_iter=self.n_power_iterations)
else:
# perform a full rank SVD using ARPACK
(U, s, V) = np.linalg.svd(
X,
full_matrices=False,
compute_uv=True)
s_thresh = np.maximum(s - shrinkage_value, 0)
rank = (s_thresh > 0).sum()
s_thresh = s_thresh[:rank]
U_thresh = U[:, :rank]
V_thresh = V[:rank, :]
S_thresh = np.diag(s_thresh)
X_reconstruction = np.dot(U_thresh, np.dot(S_thresh, V_thresh))
return X_reconstruction, rank
def _max_singular_value(self, X_filled):
# quick decomposition of X_filled into rank-1 SVD
_, s, _ = randomized_svd(
X_filled,
1,
n_iter=5)
return s[0]
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
X_init = X.copy()
X_filled = X
observed_mask = ~missing_mask
max_singular_value = self._max_singular_value(X_filled)
if self.verbose:
print("[SoftImpute] Max Singular Value of X_init = %f" % (
max_singular_value))
if self.shrinkage_value:
shrinkage_value = self.shrinkage_value
else:
# totally hackish heuristic: keep only components
# with at least 1/50th the max singular value
shrinkage_value = max_singular_value / 50.0
for i in range(self.max_iters):
X_reconstruction, rank = self._svd_step(
X_filled,
shrinkage_value,
max_rank=self.max_rank)
X_reconstruction = self.clip(X_reconstruction)
# print error on observed data
if self.verbose:
mae = masked_mae(
X_true=X_init,
X_pred=X_reconstruction,
mask=observed_mask)
print(
"[SoftImpute] Iter %d: observed MAE=%0.6f rank=%d" % (
i + 1,
mae,
rank))
converged = self._converged(
X_old=X_filled,
X_new=X_reconstruction,
missing_mask=missing_mask)
X_filled[missing_mask] = X_reconstruction[missing_mask]
if converged:
break
if self.verbose:
print("[SoftImpute] Stopped after iteration %d for lambda=%f" % (
i + 1,
shrinkage_value))
return X_filled
| apache-2.0 |
wjlei1990/pyadjoint | doc/create_sphinx_files_for_adjoint_sources.py | 2 | 4397 | #!/usr/bin/env python
# -*- encoding: utf8 -*-
"""
This will create the sphinx input files for the various defined adjoint
sources.
:copyright:
Lion Krischer ([email protected]), 2015
:license:
BSD 3-Clause ("BSD New" or "BSD Simplified")
"""
import os
folder = "adjoint_sources"
if not os.path.exists(folder):
os.makedirs(folder)
import pyadjoint
TEMPLATE = """
{upper}
{name}
{lower}
{description}
{additional_parameters}
Usage
-----
.. doctest::
>>> import pyadjoint
>>> obs, syn = pyadjoint.utils.get_example_data()
>>> obs = obs.select(component="Z")[0]
>>> syn = syn.select(component="Z")[0]
>>> start, end = pyadjoint.utils.EXAMPLE_DATA_PDIFF
>>> adj_src = pyadjoint.calculate_adjoint_source(
... adj_src_type="{short_name}", observed=obs, synthetic=syn,
... min_period=20.0, max_period=100.0, left_window_border=start,
... right_window_border=end)
>>> print(adj_src)
{name} Adjoint Source for component Z at station SY.DBO
Misfit: 4.26e-11
Adjoint source available with 3600 samples
Example Plots
-------------
The following shows plots of the :doc:`../example_dataset` for some phases.
Pdif Phase on Vertical Component
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example contains *Pdif* and some surface reflected diffracted phases
recorded on the vertical component.
.. plot::
import pyadjoint
import matplotlib.pylab as plt
fig = plt.figure(figsize=(12, 7))
obs, syn = pyadjoint.utils.get_example_data()
obs = obs.select(component="Z")[0]
syn = syn.select(component="Z")[0]
start, end = pyadjoint.utils.EXAMPLE_DATA_PDIFF
pyadjoint.calculate_adjoint_source("{short_name}", obs, syn, 20.0, 100.0,
start, end, adjoint_src=True, plot=fig)
plt.show()
Sdif Phase on Transverse Component
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example contains *Sdif* and some surface reflected diffracted phases
recorded on the transverse component.
.. plot::
import pyadjoint
import matplotlib.pylab as plt
fig = plt.figure(figsize=(12, 7))
obs, syn = pyadjoint.utils.get_example_data()
obs = obs.select(component="T")[0]
syn = syn.select(component="T")[0]
start, end = pyadjoint.utils.EXAMPLE_DATA_SDIFF
pyadjoint.calculate_adjoint_source("{short_name}", obs, syn, 20.0, 100.0,
start, end, adjoint_src=True, plot=fig)
plt.show()
""".lstrip()
ADDITIONAL_PARAMETERS_TEMPLATE = """
Additional Parameters
---------------------
Additional parameters in addition to the default ones in the central
:func:`~pyadjoint.adjoint_source.calculate_adjoint_source` function:
{params}
""".strip()
srcs = pyadjoint.AdjointSource._ad_srcs
srcs = [(key, value) for key, value in srcs.items()]
srcs = sorted(srcs, key=lambda x: x[1][1])
for key, value in srcs:
filename = os.path.join(folder, "%s.rst" % key)
additional_params = ""
if value[3]:
additional_params = ADDITIONAL_PARAMETERS_TEMPLATE.format(
params=value[3])
with open(filename, "wt") as fh:
fh.write(TEMPLATE.format(
upper="=" * len(value[1].strip()),
name=value[1].strip(),
lower="=" * len(value[1].strip()),
description=value[2].lstrip(),
short_name=key,
additional_parameters=additional_params
))
INDEX = """
===============
Adjoint Sources
===============
``Pyadjoint`` can currently calculate the following misfits measurements and
associated adjoint sources:
.. toctree::
:maxdepth: 1
{contents}
Comparative Plots of All Available Adjoint Sources
--------------------------------------------------
Pdif Phase on Vertical Component
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example contains *Pdif* and some surface reflected diffracted phases
recorded on the vertical component.
.. plot:: plots/all_adjoint_sources_pdif.py
Sdif Phase on Transverse Component
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example contains *Sdif* and some surface reflected diffracted phases
recorded on the transverse component.
.. plot:: plots/all_adjoint_sources_sdif.py
""".lstrip()
index_filename = os.path.join(folder, "index.rst")
with open(index_filename, "wt") as fh:
fh.write(INDEX.format(
contents="\n ".join([_i[0] for _i in srcs])))
| bsd-3-clause |
talbrecht/pism_pik | examples/inverse/tauc_compare.py | 2 | 2423 | #!/usr/bin/env python
try:
import netCDF4 as netCDF
except:
print "netCDF4 is not installed!"
sys.exit(1)
from matplotlib import pyplot as pp
from matplotlib import colors as mc
from optparse import OptionParser
from siple.reporting import endpause
import numpy as np
usage = """Usage: %prog [options]
Example: %prog -N 100 -n 0.1"""
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input_file", type='string',
help='input file')
parser.add_option("-c", "--tauc_cap", type='float', default=200000,
help='maximum tauc value to display')
parser.add_option("-e", "--tauc_error_cap", type='float', default=0.2,
help='maximum relative error to display')
(options, args) = parser.parse_args()
try:
ds = netCDF.Dataset(options.input_file)
except:
print('ERROR: option -i is required')
parser.print_help()
exit(0)
secpera = 3.15569259747e7
tauc = ds.variables['tauc'][...].squeeze()
tauc_true = ds.variables['tauc_true'][...].squeeze()
tauc_diff = tauc - tauc_true
not_ice = abs(ds.variables['mask'][...].squeeze() - 2) > 0.01
tauc[not_ice] = 0
tauc_true[not_ice] = 0
tauc_diff[not_ice] = 0.
u_computed = ds.variables['u_computed'][...].squeeze() * secpera
v_computed = ds.variables['v_computed'][...].squeeze() * secpera
velbase_mag_computed = np.sqrt(u_computed * u_computed + v_computed * v_computed)
not_sliding = np.logical_and((abs(u_computed) < 10.), (abs(v_computed) < 10.))
tauc[not_ice] = 0
tauc_true[not_ice] = 0
tauc_diff[not_sliding] = 0.
# difference figure
pp.clf()
pp.imshow(tauc_diff.transpose() / tauc_true.transpose(), origin='lower', vmin=-options.tauc_error_cap, vmax=options.tauc_error_cap)
pp.title(r'$(\tau_c$ - true) / true')
pp.colorbar()
# side-by-side comparison
pp.figure()
pp.subplot(1, 2, 1)
pp.imshow(tauc.transpose(), origin='lower', vmin=0.0, vmax=options.tauc_cap)
pp.title(r'$\tau_c$ [from inversion]')
pp.colorbar()
pp.subplot(1, 2, 2)
pp.imshow(tauc_true.transpose(), origin='lower', vmin=0.0, vmax=options.tauc_cap)
pp.title(r'true $\tau_c$ [prior]')
pp.colorbar()
# show computed sliding speed
pp.figure()
im = pp.imshow(velbase_mag_computed.transpose(), origin='lower',
norm=mc.LogNorm(vmin=0.1, vmax=1000.0))
pp.title('computed sliding speed')
t = [0.1, 1.0, 10.0, 100.0, 1000.0]
pp.colorbar(im, ticks=t, format='$%.1f$')
# pp.ion()
pp.show()
# endpause()
| gpl-3.0 |
toobaz/pandas | pandas/tests/indexing/test_floats.py | 1 | 30613 | import numpy as np
import pytest
from pandas import DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
class TestFloatIndexers:
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is specifically testing for the error
# message
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
tm.makeIntIndex,
tm.makeRangeIndex,
]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
msg = "Cannot index by location index"
with pytest.raises(TypeError, match=msg):
s.iloc[3.0]
msg = (
"cannot do positional indexing on {klass} with these "
r"indexers \[3\.0\] of {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]:
i = index(5)
for s in [
Series(np.arange(len(i)), index=i),
DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
]:
# getting
for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
msg = r"^3(\.0)?$"
else:
error = TypeError
msg = (
r"cannot do (label|index|positional) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}|"
"Cannot index by location index with a"
" non-integer key".format(klass=type(i), kind=str(float))
)
with pytest.raises(error, match=msg):
idxr(s)[3.0]
# label based can be a TypeError or KeyError
if s.index.inferred_type in ["string", "unicode", "mixed"]:
error = KeyError
msg = r"^3$"
else:
error = TypeError
msg = (
r"cannot do (label|index) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(error, match=msg):
s.loc[3.0]
# contains
assert 3.0 not in s
# setting with a float fails with iloc
msg = (
r"cannot do (label|index|positional) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
# setting with an indexer
if s.index.inferred_type in ["categorical"]:
# Value or Type Error
pass
elif s.index.inferred_type in ["datetime64", "timedelta64", "period"]:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
#
# with pytest.raises(TypeError):
# idxr(s2)[3.0] = 0
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
assert s2.index.is_object()
for idxr in [lambda x: x]:
s2 = s.copy()
idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
r"cannot do (label|index) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[3.0]
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=["a", "b", "c"])
s3 = Series([1, 2, 3], index=["a", "b", 1.5])
# lookup in a pure stringstr
# with an invalid indexer
for idxr in [lambda x: x, lambda x: x.iloc]:
msg = (
r"cannot do label indexing"
r" on {klass} with these indexers \[1\.0\] of"
r" {kind}|"
"Cannot index by location index with a non-integer key".format(
klass=str(Index), kind=str(float)
)
)
with pytest.raises(TypeError, match=msg):
idxr(s2)[1.0]
with pytest.raises(KeyError, match=r"^1$"):
s2.loc[1.0]
result = s2.loc["b"]
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x]:
msg = (
r"cannot do label indexing"
r" on {klass} with these indexers \[1\.0\] of"
r" {kind}".format(klass=str(Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
result = idxr(s3)[1]
expected = 2
assert result == expected
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s3.iloc[1.0]
with pytest.raises(KeyError, match=r"^1$"):
s3.loc[1.0]
result = s3.loc[1.5]
expected = 3
assert result == expected
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for i in [Int64Index(range(5)), RangeIndex(5)]:
for s in [
Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
if isinstance(s, Series):
def compare(x, y):
assert x == y
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100, index=range(len(s)), name=3)
else:
expected = Series(100.0, index=range(len(s)), name=3)
s2 = s.copy()
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.0))
for s in [
Series(np.arange(len(index)), index=index),
DataFrame(
np.random.randn(len(index), len(index)), index=index, columns=index
),
]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
# getting
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
with pytest.raises(KeyError, match=r"^3\.5$"):
idxr(s)[3.5]
# contains
assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s.iloc[3.0]
msg = (
r"cannot do positional indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=str(Float64Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]:
index = index(5)
for s in [
Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index),
]:
# getitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[l]
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers"
r" \[(3|4)(\.0)?\]"
r" of ({kind_float}|{kind_int})".format(
klass=type(index),
kind_float=str(float),
kind_int=str(int),
)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[l] = 0
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers"
r" \[(3|4)(\.0)?\]"
r" of ({kind_float}|{kind_int})".format(
klass=type(index),
kind_float=str(float),
kind_int=str(int),
)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l] = 0
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indicates if we are out of bounds
# of positional indexing
for index, oob in [
(Int64Index(range(5)), False),
(RangeIndex(5), False),
(Int64Index(range(5)) + 10, True),
]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc]:
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# getitem out-of-bounds
for l in [slice(-6, 6), slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc]:
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[-6\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[slice(-6.0, 6.0)]
# getitem odd floats
for l, res1 in [
(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4)),
]:
for idxr in [lambda x: x.loc]:
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(2|3)\.5\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x, lambda x: x.iloc]:
for l in [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]:
klass = RangeIndex
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(2|4)\.0\] of"
" {kind}".format(klass=str(klass), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [Int64Index(range(5)), RangeIndex(5)]:
s = DataFrame(np.random.randn(5, 2), index=index)
def f(idxr):
# getitem
for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]:
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(0|1)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# getitem out-of-bounds
for l in [slice(-10, 10), slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[-10\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[slice(-10.0, 10.0)]
# getitem odd floats
for l, res in [
(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2)),
]:
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[0\.5\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
f(lambda x: x.loc)
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.0)) + 0.1
for s in [
Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index),
]:
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.loc, lambda x: x]:
# getitem
result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.loc[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.loc[5.0]
assert result1 == result2
assert result1 == result3
result1 = s[5]
result2 = s.loc[5]
result3 = s.loc[5]
assert result1 == result2
assert result1 == result3
assert s[5.0] == s[5]
# value not found (and no fallbacking at all)
# scalar integers
with pytest.raises(KeyError, match=r"^4\.0$"):
s.loc[4]
with pytest.raises(KeyError, match=r"^4\.0$"):
s.loc[4]
with pytest.raises(KeyError, match=r"^4\.0$"):
s[4]
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype="int64"))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.loc[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.loc[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([np.nan, 2, 4], index=[1.6, 5, 10]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([0.0, np.nan, np.nan], index=[0, 1, 2]))
result1 = s.loc[[2.5, 5]]
result2 = s.loc[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))
result1 = s[[2.5]]
result2 = s.loc[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1], index=[2.5]))
def test_floating_tuples(self):
# see gh-13509
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name="foo")
result = s[0.0]
assert result == (1, 1)
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name="foo")
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name="foo")
result = s[0.0]
tm.assert_series_equal(result, expected)
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {
256: 2321.0,
1: 78.0,
2: 2716.0,
3: 0.0,
4: 369.0,
5: 0.0,
6: 269.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 3536.0,
11: 0.0,
12: 24.0,
13: 0.0,
14: 931.0,
15: 0.0,
16: 101.0,
17: 78.0,
18: 9643.0,
19: 0.0,
20: 0.0,
21: 0.0,
22: 63761.0,
23: 0.0,
24: 446.0,
25: 0.0,
26: 34773.0,
27: 0.0,
28: 729.0,
29: 78.0,
30: 0.0,
31: 0.0,
32: 3374.0,
33: 0.0,
34: 1391.0,
35: 0.0,
36: 361.0,
37: 0.0,
38: 61808.0,
39: 0.0,
40: 0.0,
41: 0.0,
42: 6677.0,
43: 0.0,
44: 802.0,
45: 0.0,
46: 2691.0,
47: 0.0,
48: 3582.0,
49: 0.0,
50: 734.0,
51: 0.0,
52: 627.0,
53: 70.0,
54: 2584.0,
55: 0.0,
56: 324.0,
57: 0.0,
58: 605.0,
59: 0.0,
60: 0.0,
61: 0.0,
62: 3989.0,
63: 10.0,
64: 42.0,
65: 0.0,
66: 904.0,
67: 0.0,
68: 88.0,
69: 70.0,
70: 8172.0,
71: 0.0,
72: 0.0,
73: 0.0,
74: 64902.0,
75: 0.0,
76: 347.0,
77: 0.0,
78: 36605.0,
79: 0.0,
80: 379.0,
81: 70.0,
82: 0.0,
83: 0.0,
84: 3001.0,
85: 0.0,
86: 1630.0,
87: 7.0,
88: 364.0,
89: 0.0,
90: 67404.0,
91: 9.0,
92: 0.0,
93: 0.0,
94: 7685.0,
95: 0.0,
96: 1017.0,
97: 0.0,
98: 2831.0,
99: 0.0,
100: 2963.0,
101: 0.0,
102: 854.0,
103: 0.0,
104: 0.0,
105: 0.0,
106: 0.0,
107: 0.0,
108: 0.0,
109: 0.0,
110: 0.0,
111: 0.0,
112: 0.0,
113: 0.0,
114: 0.0,
115: 0.0,
116: 0.0,
117: 0.0,
118: 0.0,
119: 0.0,
120: 0.0,
121: 0.0,
122: 0.0,
123: 0.0,
124: 0.0,
125: 0.0,
126: 67744.0,
127: 22.0,
128: 264.0,
129: 0.0,
260: 197.0,
268: 0.0,
265: 0.0,
269: 0.0,
261: 0.0,
266: 1198.0,
267: 0.0,
262: 2629.0,
258: 775.0,
257: 0.0,
263: 0.0,
259: 0.0,
264: 163.0,
250: 10326.0,
251: 0.0,
252: 1228.0,
253: 0.0,
254: 2769.0,
255: 0.0,
}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
| bsd-3-clause |
yonglehou/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
GGoussar/scikit-image | doc/examples/filters/plot_nonlocal_means.py | 13 | 1318 | """
=================================================
Non-local means denoising for preserving textures
=================================================
In this example, we denoise a detail of the astronaut image using the non-local
means filter. The non-local means algorithm replaces the value of a pixel by an
average of a selection of other pixels values: small patches centered on the
other pixels are compared to the patch centered on the pixel of interest, and
the average is performed only for pixels that have patches close to the current
patch. As a result, this algorithm can restore well textures, that would be
blurred by other denoising algoritm.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.restoration import denoise_nl_means
astro = img_as_float(data.astronaut())
astro = astro[30:180, 150:300]
noisy = astro + 0.3 * np.random.random(astro.shape)
noisy = np.clip(noisy, 0, 1)
denoise = denoise_nl_means(noisy, 7, 9, 0.08)
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax[0].imshow(noisy)
ax[0].axis('off')
ax[0].set_title('noisy')
ax[1].imshow(denoise)
ax[1].axis('off')
ax[1].set_title('non-local means')
fig.tight_layout()
plt.show()
| bsd-3-clause |
CnrLwlss/HTSauto | HTSscripts/C2Merge.py | 1 | 2809 | import os
import sys
import pandas
import json
import argparse
def parseArgs():
parser=argparse.ArgumentParser(description="Gathers completed imagelog files for a QFA experiment, concatenates them and writes files to appropriate IMAGELOGS directory. Should be executed from LOGS3 directory. Requires C2.json file (i.e. run C2Find before running C2Merge).")
parser.add_argument("exptID", type=str, help="QFA experiment ID, e.g. QFA00001")
args = parser.parse_args()
return(args)
def checkFile(f,verbose=True,deleteEmpty=False):
if os.path.isfile(f):
if sum(1 for line in open(f))<1:
if verbose: print(f+" EMPTY")
if deleteEmpty:
print("Deleting "+f)
os.remove(f)
return(False)
else:
if verbose: print(f+" MISSING")
return(False)
return(True)
def main():
args=parseArgs()
# Should execute this script from LOGS3 directory
rootDir=os.getcwd()
expt=str(args.exptID)
exptType=expt[0:-4]
dataDir=os.path.join(rootDir,exptType+"_EXPERIMENTS")
dictOut=os.path.join(dataDir,expt,"AUXILIARY",expt+'_C2.json')
print("Reading in dictionary describing image locations")
with open(dictOut, 'rb') as fp:
barcDict = json.load(fp)
print("Generating expected output filenames for images")
barcFiles=[item for sublist in barcDict.values() for item in sublist]
barcFiles.sort()
outFiles=[os.path.join(os.path.dirname(f),"Output_Data",os.path.basename(f).split(".")[0]+".out") for f in barcFiles]
datFiles=[os.path.join(os.path.dirname(f),"Output_Data",os.path.basename(f).split(".")[0]+".dat") for f in barcFiles]
for i,f in enumerate(outFiles):
outf=f
datf=datFiles[i]
checkout=checkFile(outf,deleteEmpty=True)
checkdat=checkFile(datf,deleteEmpty=True)
print("Reading in expected output files")
outDFs=[pandas.read_csv(f,sep="\t") if (os.path.isfile(f) and sum(1 for line in open(f))>0) else pandas.DataFrame() for f in outFiles]
datDFs=[pandas.read_csv(f,sep="\t",header=None) if (os.path.isfile(f) and sum(1 for line in open(f))>0) else pandas.DataFrame() for f in datFiles]
print("Merging output files")
outDF=pandas.concat(outDFs)
datDF=pandas.concat(datDFs)
print("Archiving existing output files in IMAGELOGS directory")
imlogs=os.path.join(dataDir,expt,"IMAGELOGS")
for f in os.listdir(imlogs):
if f.endswith(".out") or f.endswith(".dat"):
os.rename(os.path.join(imlogs,f),os.path.join(imlogs,f+"_ARCHIVE"))
print("Writing merged output to file")
outDF.to_csv(os.path.join(dataDir,expt,"IMAGELOGS",expt+"_Concatenated.out"),"\t",index=False,header=True)
datDF.to_csv(os.path.join(dataDir,expt,"IMAGELOGS",expt+"_Concatenated.dat"),"\t",index=False,header=False)
if __name__ == '__main__':
main()
| gpl-2.0 |
clarkfitzg/xray | xray/test/test_conventions.py | 2 | 23269 | import contextlib
import numpy as np
import pandas as pd
import warnings
from xray import conventions, Variable, Dataset, open_dataset
from xray.core import utils, indexing
from . import TestCase, requires_netCDF4, unittest
from .test_backends import CFEncodedDataTest
from xray.core.pycompat import iteritems
from xray.backends.memory import InMemoryDataStore
from xray.conventions import cf_encoder, cf_decoder, decode_cf
class TestMaskedAndScaledArray(TestCase):
def test(self):
x = conventions.MaskedAndScaledArray(np.arange(3), fill_value=0)
self.assertEqual(x.dtype, np.dtype('float'))
self.assertEqual(x.shape, (3,))
self.assertEqual(x.size, 3)
self.assertEqual(x.ndim, 1)
self.assertEqual(len(x), 3)
self.assertArrayEqual([np.nan, 1, 2], x)
x = conventions.MaskedAndScaledArray(np.arange(3), add_offset=1)
self.assertArrayEqual(np.arange(3) + 1, x)
x = conventions.MaskedAndScaledArray(np.arange(3), scale_factor=2)
self.assertArrayEqual(2 * np.arange(3), x)
x = conventions.MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]),
-99, 0.01, 1)
expected = np.array([np.nan, 0.99, 1, 1.01, 1.02])
self.assertArrayEqual(expected, x)
def test_0d(self):
x = conventions.MaskedAndScaledArray(np.array(0), fill_value=0)
self.assertTrue(np.isnan(x))
self.assertTrue(np.isnan(x[...]))
x = conventions.MaskedAndScaledArray(np.array(0), fill_value=10)
self.assertEqual(0, x[...])
class TestCharToStringArray(TestCase):
def test_wrapper_class(self):
array = np.array(list('abc'), dtype='S')
actual = conventions.CharToStringArray(array)
expected = np.array('abc', dtype='S')
self.assertEqual(actual.dtype, expected.dtype)
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(actual.size, expected.size)
self.assertEqual(actual.ndim, expected.ndim)
with self.assertRaises(TypeError):
len(actual)
self.assertArrayEqual(expected, actual)
with self.assertRaises(IndexError):
actual[:2]
self.assertEqual(str(actual), 'abc')
array = np.array([list('abc'), list('cdf')], dtype='S')
actual = conventions.CharToStringArray(array)
expected = np.array(['abc', 'cdf'], dtype='S')
self.assertEqual(actual.dtype, expected.dtype)
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(actual.size, expected.size)
self.assertEqual(actual.ndim, expected.ndim)
self.assertEqual(len(actual), len(expected))
self.assertArrayEqual(expected, actual)
self.assertArrayEqual(expected[:1], actual[:1])
with self.assertRaises(IndexError):
actual[:, :2]
def test_char_to_string(self):
array = np.array([['a', 'b', 'c'], ['d', 'e', 'f']])
expected = np.array(['abc', 'def'])
actual = conventions.char_to_string(array)
self.assertArrayEqual(actual, expected)
expected = np.array(['ad', 'be', 'cf'])
actual = conventions.char_to_string(array.T) # non-contiguous
self.assertArrayEqual(actual, expected)
def test_string_to_char(self):
array = np.array([['ab', 'cd'], ['ef', 'gh']])
expected = np.array([[['a', 'b'], ['c', 'd']],
[['e', 'f'], ['g', 'h']]])
actual = conventions.string_to_char(array)
self.assertArrayEqual(actual, expected)
expected = np.array([[['a', 'b'], ['e', 'f']],
[['c', 'd'], ['g', 'h']]])
actual = conventions.string_to_char(array.T)
self.assertArrayEqual(actual, expected)
@np.vectorize
def _ensure_naive_tz(dt):
if hasattr(dt, 'tzinfo'):
return dt.replace(tzinfo=None)
else:
return dt
class TestDatetime(TestCase):
@requires_netCDF4
def test_cf_datetime(self):
import netCDF4 as nc4
for num_dates, units in [
(np.arange(10), 'days since 2000-01-01'),
(np.arange(10).reshape(2, 5), 'days since 2000-01-01'),
(12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'),
# here we add a couple minor formatting errors to test
# the robustness of the parsing algorithm.
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), u'Hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '),
(10, 'days since 2000-01-01'),
([10], 'daYs since 2000-01-01'),
([[10]], 'days since 2000-01-01'),
([10, 10], 'days since 2000-01-01'),
(np.array(10), 'days since 2000-01-01'),
(0, 'days since 1000-01-01'),
([0], 'days since 1000-01-01'),
([[0]], 'days since 1000-01-01'),
(np.arange(2), 'days since 1000-01-01'),
(np.arange(0, 100000, 20000), 'days since 1900-01-01'),
(17093352.0, 'hours since 1-1-1 00:00:0.0'),
([0.5, 1.5], 'hours since 1900-01-01T00:00:00'),
(0, 'milliseconds since 2000-01-01T00:00:00'),
(0, 'microseconds since 2000-01-01T00:00:00'),
]:
for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:
expected = _ensure_naive_tz(nc4.num2date(num_dates, units, calendar))
print(num_dates, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = conventions.decode_cf_datetime(num_dates, units,
calendar)
if (isinstance(actual, np.ndarray)
and np.issubdtype(actual.dtype, np.datetime64)):
# self.assertEqual(actual.dtype.kind, 'M')
# For some reason, numpy 1.8 does not compare ns precision
# datetime64 arrays as equal to arrays of datetime objects,
# but it works for us precision. Thus, convert to us
# precision for the actual array equal comparison...
actual_cmp = actual.astype('M8[us]')
else:
actual_cmp = actual
self.assertArrayEqual(expected, actual_cmp)
encoded, _, _ = conventions.encode_cf_datetime(actual, units,
calendar)
if '1-1-1' not in units:
# pandas parses this date very strangely, so the original
# units/encoding cannot be preserved in this case:
# (Pdb) pd.to_datetime('1-1-1 00:00:0.0')
# Timestamp('2001-01-01 00:00:00')
self.assertArrayEqual(num_dates, np.around(encoded, 1))
if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1
and '1000' not in units):
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index :(
encoded, _, _ = conventions.encode_cf_datetime(
pd.Index(actual), units, calendar)
self.assertArrayEqual(num_dates, np.around(encoded, 1))
def test_decoded_cf_datetime_array(self):
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual, expected)
# default calendar
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01')
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual, expected)
def test_slice_decoded_cf_datetime_array(self):
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual[slice(0, 2)], expected[slice(0, 2)])
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual[[0, 2]], expected[[0, 2]])
def test_decode_cf_datetime_non_standard_units(self):
expected = pd.date_range(periods=100, start='1970-01-01', freq='h')
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by netcdftime, but pd.Timestamp works
units = 'hours since 1-1-1970'
actual = conventions.decode_cf_datetime(np.arange(100), units)
self.assertArrayEqual(actual, expected)
def test_decode_cf_with_conflicting_fill_missing_value(self):
var = Variable(['t'], np.arange(10),
{'units': 'foobar',
'missing_value': 0,
'_FillValue': 1})
self.assertRaisesRegexp(ValueError, "_FillValue and missing_value",
lambda: conventions.decode_cf_variable(var))
@requires_netCDF4
def test_decode_cf_datetime_non_iso_strings(self):
# datetime strings that are _almost_ ISO compliant but not quite,
# but which netCDF4.num2date can still parse correctly
expected = pd.date_range(periods=100, start='2000-01-01', freq='h')
cases = [(np.arange(100), 'hours since 2000-01-01 0'),
(np.arange(100), 'hours since 2000-1-1 0'),
(np.arange(100), 'hours since 2000-01-01 0:00')]
for num_dates, units in cases:
actual = conventions.decode_cf_datetime(num_dates, units)
self.assertArrayEqual(actual, expected)
@requires_netCDF4
def test_decode_non_standard_calendar(self):
import netCDF4 as nc4
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23',
freq='H')
noleap_time = nc4.date2num(times.to_pydatetime(), units,
calendar=calendar)
expected = times.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = conventions.decode_cf_datetime(noleap_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())
@requires_netCDF4
def test_decode_non_standard_calendar_single_element(self):
units = 'days since 0001-01-01'
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = conventions.decode_cf_datetime(num_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
@requires_netCDF4
def test_decode_non_standard_calendar_single_element_fallback(self):
import netCDF4 as nc4
units = 'days since 0001-01-01'
dt = nc4.netcdftime.datetime(2001, 2, 29)
for calendar in ['360_day', 'all_leap', '366_day']:
num_time = nc4.date2num(dt, units, calendar)
with self.assertWarns('Unable to decode time axis'):
actual = conventions.decode_cf_datetime(num_time, units,
calendar=calendar)
expected = np.asarray(nc4.num2date(num_time, units, calendar))
print(num_time, calendar, actual, expected)
self.assertEqual(actual.dtype, np.dtype('O'))
self.assertEqual(expected, actual)
@requires_netCDF4
def test_decode_non_standard_calendar_multidim_time(self):
import netCDF4 as nc4
calendar = 'noleap'
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
noleap_time1 = nc4.date2num(times1.to_pydatetime(), units,
calendar=calendar)
noleap_time2 = nc4.date2num(times2.to_pydatetime(), units,
calendar=calendar)
mdim_time = np.empty((len(noleap_time1), 2), )
mdim_time[:, 0] = noleap_time1
mdim_time[:, 1] = noleap_time2
expected1 = times1.values
expected2 = times2.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = conventions.decode_cf_datetime(mdim_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
self.assertArrayEqual(actual[:, 0], expected1)
self.assertArrayEqual(actual[:, 1], expected2)
@requires_netCDF4
def test_decode_non_standard_calendar_fallback(self):
import netCDF4 as nc4
# ensure leap year doesn't matter
for year in [2010, 2011, 2012, 2013, 2014]:
for calendar in ['360_day', '366_day', 'all_leap']:
calendar = '360_day'
units = 'days since {0}-01-01'.format(year)
num_times = np.arange(100)
expected = nc4.num2date(num_times, units, calendar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual = conventions.decode_cf_datetime(num_times, units,
calendar=calendar)
self.assertEqual(len(w), 1)
self.assertIn('Unable to decode time axis',
str(w[0].message))
self.assertEqual(actual.dtype, np.dtype('O'))
self.assertArrayEqual(actual, expected)
def test_cf_datetime_nan(self):
for num_dates, units, expected_list in [
([np.nan], 'days since 2000-01-01', ['NaT']),
([np.nan, 0], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z']),
([np.nan, 0, 1], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']),
]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN')
actual = conventions.decode_cf_datetime(num_dates, units)
expected = np.array(expected_list, dtype='datetime64[ns]')
self.assertArrayEqual(expected, actual)
def test_infer_datetime_units(self):
for dates, expected in [(pd.date_range('1900-01-01', periods=5),
'days since 1900-01-01 00:00:00'),
(pd.date_range('1900-01-01 12:00:00', freq='H',
periods=2),
'hours since 1900-01-01 12:00:00'),
(['1900-01-01', '1900-01-02',
'1900-01-02 00:00:01'],
'seconds since 1900-01-01 00:00:00'),
(pd.to_datetime(['1900-01-01', '1900-01-02', 'NaT']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['1900-01-01',
'1900-01-02T00:00:00.005']),
'seconds since 1900-01-01 00:00:00')]:
self.assertEqual(expected, conventions.infer_datetime_units(dates))
def test_cf_timedelta(self):
examples = [
('1D', 'days', np.int64(1)),
(['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')),
('1h', 'hours', np.int64(1)),
('1ms', 'milliseconds', np.int64(1)),
('1us', 'microseconds', np.int64(1)),
(['NaT', '0s', '1s'], None, [np.nan, 0, 1]),
(['30m', '60m'], 'hours', [0.5, 1.0]),
]
if pd.__version__ >= '0.16':
# not quite sure why, but these examples don't work on older pandas
examples.extend([(np.timedelta64('NaT', 'ns'), 'days', np.nan),
(['NaT', 'NaT'], 'days', [np.nan, np.nan])])
for timedeltas, units, numbers in examples:
timedeltas = pd.to_timedelta(timedeltas, box=False)
numbers = np.array(numbers)
expected = numbers
actual, _ = conventions.encode_cf_timedelta(timedeltas, units)
self.assertArrayEqual(expected, actual)
self.assertEqual(expected.dtype, actual.dtype)
if units is not None:
expected = timedeltas
actual = conventions.decode_cf_timedelta(numbers, units)
self.assertArrayEqual(expected, actual)
self.assertEqual(expected.dtype, actual.dtype)
expected = np.timedelta64('NaT', 'ns')
actual = conventions.decode_cf_timedelta(np.array(np.nan), 'days')
self.assertArrayEqual(expected, actual)
def test_infer_timedelta_units(self):
for deltas, expected in [
(pd.to_timedelta(['1 day', '2 days']), 'days'),
(pd.to_timedelta(['1h', '1 day 1 hour']), 'hours'),
(pd.to_timedelta(['1m', '2m', np.nan]), 'minutes'),
(pd.to_timedelta(['1m3s', '1m4s']), 'seconds')]:
self.assertEqual(expected, conventions.infer_timedelta_units(deltas))
def test_invalid_units_raises_eagerly(self):
ds = Dataset({'time': ('time', [0, 1], {'units': 'foobar since 123'})})
with self.assertRaisesRegexp(ValueError, 'unable to decode time'):
decode_cf(ds)
@requires_netCDF4
def test_dataset_repr_with_netcdf4_datetimes(self):
# regression test for #347
attrs = {'units': 'days since 0001-01-01', 'calendar': 'noleap'}
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'unable to decode time')
ds = decode_cf(Dataset({'time': ('time', [0, 1], attrs)}))
self.assertIn('(time) object', repr(ds))
attrs = {'units': 'days since 1900-01-01'}
ds = decode_cf(Dataset({'time': ('time', [0, 1], attrs)}))
self.assertIn('(time) datetime64[ns]', repr(ds))
class TestNativeEndiannessArray(TestCase):
def test(self):
x = np.arange(5, dtype='>i8')
expected = np.arange(5, dtype='int64')
a = conventions.NativeEndiannessArray(x)
assert a.dtype == expected.dtype
assert a.dtype == expected[:].dtype
self.assertArrayEqual(a, expected)
@requires_netCDF4
class TestEncodeCFVariable(TestCase):
def test_incompatible_attributes(self):
invalid_vars = [
Variable(['t'], pd.date_range('2000-01-01', periods=3),
{'units': 'foobar'}),
Variable(['t'], pd.to_timedelta(['1 day']), {'units': 'foobar'}),
Variable(['t'], [0, 1, 2], {'add_offset': 0}, {'add_offset': 2}),
Variable(['t'], [0, 1, 2], {'_FillValue': 0}, {'_FillValue': 2}),
]
for var in invalid_vars:
with self.assertRaises(ValueError):
conventions.encode_cf_variable(var)
@requires_netCDF4
class TestDecodeCF(TestCase):
def test_dataset(self):
original = Dataset({
't': ('t', [0, 1, 2], {'units': 'days since 2000-01-01'}),
'foo': ('t', [0, 0, 0], {'coordinates': 'y', 'units': 'bar'}),
'y': ('t', [5, 10, -999], {'_FillValue': -999})
})
expected = Dataset({'foo': ('t', [0, 0, 0], {'units': 'bar'})},
{'t': pd.date_range('2000-01-01', periods=3),
'y': ('t', [5.0, 10.0, np.nan])})
actual = conventions.decode_cf(original)
self.assertDatasetIdentical(expected, actual)
def test_invalid_coordinates(self):
# regression test for GH308
original = Dataset({'foo': ('t', [1, 2], {'coordinates': 'invalid'})})
actual = conventions.decode_cf(original)
self.assertDatasetIdentical(original, actual)
class CFEncodedInMemoryStore(InMemoryDataStore):
def store(self, variables, attributes):
variables, attributes = cf_encoder(variables, attributes)
InMemoryDataStore.store(self, variables, attributes)
class NullWrapper(utils.NDArrayMixin):
"""
Just for testing, this lets us create a numpy array directly
but make it look like its not in memory yet.
"""
def __init__(self, array):
self.array = array
def __getitem__(self, key):
return self.array[indexing.orthogonal_indexer(key, self.shape)]
def null_wrap(ds):
"""
Given a data store this wraps each variable in a NullWrapper so that
it appears to be out of memory.
"""
variables = dict((k, Variable(v.dims, NullWrapper(v.values), v.attrs))
for k, v in iteritems(ds))
return InMemoryDataStore(variables=variables, attributes=ds.attrs)
@requires_netCDF4
class TestCFEncodedDataStore(CFEncodedDataTest, TestCase):
@contextlib.contextmanager
def create_store(self):
yield CFEncodedInMemoryStore()
@contextlib.contextmanager
def roundtrip(self, data, decode_cf=True):
store = CFEncodedInMemoryStore()
data.dump_to_store(store)
yield open_dataset(store, decode_cf=decode_cf)
def test_roundtrip_coordinates(self):
raise unittest.SkipTest('cannot roundtrip coordinates yet for '
'CFEncodedInMemoryStore')
| apache-2.0 |
sauliusl/seaborn | seaborn/regression.py | 3 | 37333 | """Plotting functions for linear models (broadly construed)."""
from __future__ import division
import copy
from textwrap import dedent
import warnings
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import statsmodels
assert statsmodels
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .axisgrid import FacetGrid, _facet_docs
__all__ = ["lmplot", "regplot", "residplot"]
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, string_types) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, string_types):
setattr(self, var, data[val])
elif isinstance(val, list):
setattr(self, var, np.asarray(val))
else:
setattr(self, var, val)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Save the range of the x variable for the grid later
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.x_ci == "sd":
sd = np.std(_y)
_ci = est - sd, est + sd
else:
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y, func=self.x_estimator,
n_boot=self.n_boot, units=units)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
def reg_func(_x, _y):
return np.linalg.pinv(_x).dot(_y)
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
def reg_func(_x, _y):
return np.polyval(np.polyfit(_x, _y, order), grid)
x, y = self.x, self.y
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
import statsmodels.genmod.generalized_linear_model as glm
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
def reg_func(_x, _y):
try:
yhat = model(_y, _x, **kwargs).fit().predict(grid)
except glm.PerfectSeparationError:
yhat = np.empty(len(grid))
yhat.fill(np.nan)
return yhat
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = self.x
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.c_[utils.percentiles(x, percentiles)]
else:
bins = np.c_[np.ravel(bins)]
dist = distance.cdist(np.c_[x], bins)
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins.ravel()
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return (a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = plt.plot(self.x.mean(), self.y.mean())
color = lines.get_color()
lines.remove()
else:
color = self.color
# Ensure that color is hex to avoid matplotlib weidness
color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
xlim = ax.get_xlim()
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
ax.plot(grid, yhat, **kws)
if err_bands is not None:
ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)
ax.set_xlim(*xlim, auto=None)
_regression_docs = dict(
model_api=dedent("""\
There are a number of mutually exclusive options for estimating the
regression model. See the :ref:`tutorial <regression_tutorial>` for more
information.\
"""),
regplot_vs_lmplot=dedent("""\
The :func:`regplot` and :func:`lmplot` functions are closely related, but
the former is an axes-level function while the latter is a figure-level
function that combines :func:`regplot` and :class:`FacetGrid`.\
"""),
x_estimator=dedent("""\
x_estimator : callable that maps vector -> scalar, optional
Apply this function to each unique value of ``x`` and plot the
resulting estimate. This is useful when ``x`` is a discrete variable.
If ``x_ci`` is given, this estimate will be bootstrapped and a
confidence interval will be drawn.\
"""),
x_bins=dedent("""\
x_bins : int or vector, optional
Bin the ``x`` variable into discrete bins and then estimate the central
tendency and a confidence interval. This binning only influences how
the scatterplot is drawn; the regression is still fit to the original
data. This parameter is interpreted either as the number of
evenly-sized (not necessary spaced) bins or the positions of the bin
centers. When this parameter is used, it implies that the default of
``x_estimator`` is ``numpy.mean``.\
"""),
x_ci=dedent("""\
x_ci : "ci", "sd", int in [0, 100] or None, optional
Size of the confidence interval used when plotting a central tendency
for discrete values of ``x``. If ``"ci"``, defer to the value of the
``ci`` parameter. If ``"sd"``, skip bootstrapping and show the
standard deviation of the observations in each bin.\
"""),
scatter=dedent("""\
scatter : bool, optional
If ``True``, draw a scatterplot with the underlying observations (or
the ``x_estimator`` values).\
"""),
fit_reg=dedent("""\
fit_reg : bool, optional
If ``True``, estimate and plot a regression model relating the ``x``
and ``y`` variables.\
"""),
ci=dedent("""\
ci : int in [0, 100] or None, optional
Size of the confidence interval for the regression estimate. This will
be drawn using translucent bands around the regression line. The
confidence interval is estimated using a bootstrap; for large
datasets, it may be advisable to avoid that computation by setting
this parameter to None.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstrap resamples used to estimate the ``ci``. The default
value attempts to balance time and stability; you may want to increase
this value for "final" versions of plots.\
"""),
units=dedent("""\
units : variable name in ``data``, optional
If the ``x`` and ``y`` observations are nested within sampling units,
those can be specified here. This will be taken into account when
computing the confidence intervals by performing a multilevel bootstrap
that resamples both units and observations (within unit). This does not
otherwise influence how the regression is estimated or drawn.\
"""),
order=dedent("""\
order : int, optional
If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
polynomial regression.\
"""),
logistic=dedent("""\
logistic : bool, optional
If ``True``, assume that ``y`` is a binary variable and use
``statsmodels`` to estimate a logistic regression model. Note that this
is substantially more computationally intensive than linear regression,
so you may wish to decrease the number of bootstrap resamples
(``n_boot``) or set ``ci`` to None.\
"""),
lowess=dedent("""\
lowess : bool, optional
If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
model (locally weighted linear regression). Note that confidence
intervals cannot currently be drawn for this kind of model.\
"""),
robust=dedent("""\
robust : bool, optional
If ``True``, use ``statsmodels`` to estimate a robust regression. This
will de-weight outliers. Note that this is substantially more
computationally intensive than standard linear regression, so you may
wish to decrease the number of bootstrap resamples (``n_boot``) or set
``ci`` to None.\
"""),
logx=dedent("""\
logx : bool, optional
If ``True``, estimate a linear regression of the form y ~ log(x), but
plot the scatterplot and regression model in the input space. Note that
``x`` must be positive for this to work.\
"""),
xy_partial=dedent("""\
{x,y}_partial : strings in ``data`` or matrices
Confounding variables to regress out of the ``x`` or ``y`` variables
before plotting.\
"""),
truncate=dedent("""\
truncate : bool, optional
By default, the regression line is drawn to fill the x axis limits
after the scatterplot is drawn. If ``truncate`` is ``True``, it will
instead by bounded by the data limits.\
"""),
xy_jitter=dedent("""\
{x,y}_jitter : floats, optional
Add uniform random noise of this size to either the ``x`` or ``y``
variables. The noise is added to a copy of the data after fitting the
regression, and only influences the look of the scatterplot. This can
be helpful when plotting variables that take discrete values.\
"""),
scatter_line_kws=dedent("""\
{scatter,line}_kws : dictionaries
Additional keyword arguments to pass to ``plt.scatter`` and
``plt.plot``.\
"""),
)
_regression_docs.update(_facet_docs)
def lmplot(x, y, data, hue=None, col=None, row=None, palette=None,
col_wrap=None, height=5, aspect=1, markers="o", sharex=True,
sharey=True, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=True, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None, truncate=False,
x_jitter=None, y_jitter=None, scatter_kws=None, line_kws=None,
size=None):
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` paramter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(data, row, col, hue, palette=palette,
row_order=row_order, col_order=col_order,
hue_order=hue_order, height=height, aspect=aspect,
col_wrap=col_wrap, sharex=sharex, sharey=sharey,
legend_out=legend_out)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers "
"for each level of the hue variable"))
facets.hue_kws = {"marker": markers}
# Hack to set the x limits properly, which needs to happen here
# because the extent of the regression estimate is determined
# by the limits of the plot
if sharex:
for ax in facets.axes.flat:
ax.scatter(data[x], np.ones(len(data)) * data[y].mean()).remove()
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
order=order, logistic=logistic, lowess=lowess, robust=robust,
logx=logx, x_partial=x_partial, y_partial=y_partial, truncate=truncate,
x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x, y, **regplot_kws)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
lmplot.__doc__ = dedent("""\
Plot data and regression model fits across a FacetGrid.
This function combines :func:`regplot` and :class:`FacetGrid`. It is
intended as a convenient interface to fit regression models across
conditional subsets of a dataset.
When thinking about how to assign variables to different facets, a general
rule is that it makes sense to use ``hue`` for the most important
comparison, followed by ``col`` and ``row``. However, always think about
your particular dataset and the goals of the visualization you are
creating.
{model_api}
The parameters to this function span most of the options in
:class:`FacetGrid`, although there may be occasional cases where you will
want to use that class and :func:`regplot` directly.
Parameters
----------
x, y : strings, optional
Input variables; these should be column names in ``data``.
{data}
hue, col, row : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to control
the order of levels of this variable.
{palette}
{col_wrap}
{height}
{aspect}
markers : matplotlib marker code or list of marker codes, optional
Markers for the scatterplot. If a list, each marker in the list will be
used for each level of the ``hue`` variable.
{share_xy}
{{hue,col,row}}_order : lists, optional
Order for the levels of the faceting variables. By default, this will
be the order that the levels appear in ``data`` or, if the variables
are pandas categoricals, the category order.
legend : bool, optional
If ``True`` and there is a ``hue`` variable, add a legend.
{legend_out}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
{scatter_line_kws}
See Also
--------
regplot : Plot data and a conditional model fit.
FacetGrid : Subplot grid for plotting conditional relationships.
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
Notes
-----
{regplot_vs_lmplot}
Examples
--------
These examples focus on basic regression model plots to exhibit the
various faceting options; see the :func:`regplot` docs for demonstrations
of the other options for plotting the data and models. There are also
other examples for how to manipulate plot using the returned object on
the :class:`FacetGrid` docs.
Plot a simple linear relationship between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.lmplot(x="total_bill", y="tip", data=tips)
Condition on a third variable and plot the levels in different colors:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips)
Use different markers as well as colors so the plot will reproduce to
black-and-white more easily:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... markers=["o", "x"])
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette="Set1")
Map ``hue`` levels to colors with a dictionary:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette=dict(Yes="g", No="m"))
Plot the levels of the third variable across different columns:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="smoker", data=tips)
Change the height and aspect ratio of the facets:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="size", y="total_bill", hue="day", col="day",
... data=tips, height=6, aspect=.4, x_jitter=.1)
Wrap the levels of the column variable into multiple rows:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="day", hue="day",
... data=tips, col_wrap=2, height=3)
Condition on two variables to make a full grid:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, height=3)
Use methods on the returned :class:`FacetGrid` instance to further tweak
the plot:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, height=3)
>>> g = (g.set_axis_labels("Total bill (US Dollars)", "Tip")
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.02))
""").format(**_regression_docs)
def regplot(x, y, data=None, x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to ether the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
Plot the relationship between two variables in a DataFrame:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> ax = sns.regplot(x="total_bill", y="tip", data=tips)
Plot with two variables defined as numpy arrays; use a different color:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(8)
>>> mean, cov = [4, 6], [(1.5, .7), (.7, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 80).T
>>> ax = sns.regplot(x=x, y=y, color="g")
Plot with two variables defined as pandas Series; use a different marker:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x, y = pd.Series(x, name="x_var"), pd.Series(y, name="y_var")
>>> ax = sns.regplot(x=x, y=y, marker="+")
Use a 68% confidence interval, which corresponds with the standard error
of the estimate:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, ci=68)
Plot with a discrete ``x`` variable and add some jitter:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips, x_jitter=.1)
Plot with a discrete ``x`` variable showing means and confidence intervals
for unique values:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean)
Plot with a continuous variable divided into discrete bins:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, x_bins=4)
Fit a higher-order polynomial regression and truncate the model prediction:
.. plot::
:context: close-figs
>>> ans = sns.load_dataset("anscombe")
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "II"],
... scatter_kws={{"s": 80}},
... order=2, ci=None, truncate=True)
Fit a robust regression and don't plot a confidence interval:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "III"],
... scatter_kws={{"s": 80}},
... robust=True, ci=None)
Fit a logistic regression; jitter the y variable and use fewer bootstrap
iterations:
.. plot::
:context: close-figs
>>> tips["big_tip"] = (tips.tip / tips.total_bill) > .175
>>> ax = sns.regplot(x="total_bill", y="big_tip", data=tips,
... logistic=True, n_boot=500, y_jitter=.03)
Fit the regression model using log(x) and truncate the model prediction:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean, logx=True, truncate=True)
""").format(**_regression_docs)
def residplot(x, y, data=None, lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot (with kind="resid"): Draw a residplot with univariate
marginal distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws
line_kws = {} if line_kws is None else line_kws
plotter.plot(ax, scatter_kws, line_kws)
return ax
| bsd-3-clause |
mlyundin/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
plotly/python-api | packages/python/plotly/plotly/graph_objs/_area.py | 1 | 32343 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Area(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "area"
_valid_props = {
"customdata",
"customdatasrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"ids",
"idssrc",
"legendgroup",
"marker",
"meta",
"metasrc",
"name",
"opacity",
"r",
"rsrc",
"showlegend",
"stream",
"t",
"tsrc",
"type",
"uid",
"uirevision",
"visible",
}
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.area.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.area.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.area.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Area traces are deprecated! Please switch to
the "barpolar" trace type. Sets themarkercolor.
It accepts either a specific color or an array
of numbers that are mapped to the colorscale
relative to the max and min values of the array
or relative to `marker.cmin` and `marker.cmax`
if set.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
opacity
Area traces are deprecated! Please switch to
the "barpolar" trace type. Sets the marker
opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
size
Area traces are deprecated! Please switch to
the "barpolar" trace type. Sets the marker size
(in px).
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Area traces are deprecated! Please switch to
the "barpolar" trace type. Sets the marker
symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200
is equivalent to appending "-dot" to a symbol
name. Adding 300 is equivalent to appending
"-open-dot" or "dot-open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
Returns
-------
plotly.graph_objs.area.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# r
# -
@property
def r(self):
"""
Area traces are deprecated! Please switch to the "barpolar"
trace type. Sets the radial coordinates for legacy polar chart
only.
The 'r' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["r"]
@r.setter
def r(self, val):
self["r"] = val
# rsrc
# ----
@property
def rsrc(self):
"""
Sets the source reference on Chart Studio Cloud for r .
The 'rsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["rsrc"]
@rsrc.setter
def rsrc(self, val):
self["rsrc"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.area.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.area.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# t
# -
@property
def t(self):
"""
Area traces are deprecated! Please switch to the "barpolar"
trace type. Sets the angular coordinates for legacy polar chart
only.
The 't' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["t"]
@t.setter
def t(self, val):
self["t"] = val
# tsrc
# ----
@property
def tsrc(self):
"""
Sets the source reference on Chart Studio Cloud for t .
The 'tsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tsrc"]
@tsrc.setter
def tsrc(self, val):
self["tsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.area.Hoverlabel` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
:class:`plotly.graph_objects.area.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
r
Area traces are deprecated! Please switch to the
"barpolar" trace type. Sets the radial coordinates for
legacy polar chart only.
rsrc
Sets the source reference on Chart Studio Cloud for r
.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.area.Stream` instance or
dict with compatible properties
t
Area traces are deprecated! Please switch to the
"barpolar" trace type. Sets the angular coordinates for
legacy polar chart only.
tsrc
Sets the source reference on Chart Studio Cloud for t
.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
ids=None,
idssrc=None,
legendgroup=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
r=None,
rsrc=None,
showlegend=None,
stream=None,
t=None,
tsrc=None,
uid=None,
uirevision=None,
visible=None,
**kwargs
):
"""
Construct a new Area object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Area`
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.area.Hoverlabel` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
:class:`plotly.graph_objects.area.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
r
Area traces are deprecated! Please switch to the
"barpolar" trace type. Sets the radial coordinates for
legacy polar chart only.
rsrc
Sets the source reference on Chart Studio Cloud for r
.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.area.Stream` instance or
dict with compatible properties
t
Area traces are deprecated! Please switch to the
"barpolar" trace type. Sets the angular coordinates for
legacy polar chart only.
tsrc
Sets the source reference on Chart Studio Cloud for t
.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Area
"""
super(Area, self).__init__("area")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Area
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Area`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("r", None)
_v = r if r is not None else _v
if _v is not None:
self["r"] = _v
_v = arg.pop("rsrc", None)
_v = rsrc if rsrc is not None else _v
if _v is not None:
self["rsrc"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("t", None)
_v = t if t is not None else _v
if _v is not None:
self["t"] = _v
_v = arg.pop("tsrc", None)
_v = tsrc if tsrc is not None else _v
if _v is not None:
self["tsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "area"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
openeemeter/eemeter | eemeter/visualization.py | 1 | 4111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from .features import (
merge_features,
compute_usage_per_day_feature,
compute_temperature_features,
)
__all__ = ("plot_energy_signature", "plot_time_series")
def plot_time_series(meter_data, temperature_data, **kwargs):
"""Plot meter and temperature data in dual-axes time series.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
**kwargs
Arbitrary keyword arguments to pass to
:any:`plt.subplots <matplotlib.pyplot.subplots>`
Returns
-------
axes : :any:`tuple` of :any:`matplotlib.axes.Axes`
Tuple of ``(ax_meter_data, ax_temperature_data)``.
"""
# TODO(philngo): include image in docs.
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
default_kwargs = {"figsize": (16, 4)}
default_kwargs.update(kwargs)
fig, ax1 = plt.subplots(**default_kwargs)
ax1.plot(
meter_data.index,
meter_data.value,
color="C0",
label="Energy Use",
drawstyle="steps-post",
)
ax1.set_ylabel("Energy Use")
ax2 = ax1.twinx()
ax2.plot(
temperature_data.index,
temperature_data,
color="C1",
label="Temperature",
alpha=0.8,
)
ax2.set_ylabel("Temperature")
fig.legend()
return ax1, ax2
def plot_energy_signature(
meter_data,
temperature_data,
temp_col=None,
ax=None,
title=None,
figsize=None,
**kwargs
):
"""Plot meter and temperature data in energy signature.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
temp_col : :any:`str`, default ``'temperature_mean'``
The name of the temperature column.
ax : :any:`matplotlib.axes.Axes`
The axis on which to plot.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
**kwargs
Arbitrary keyword arguments to pass to
:any:`matplotlib.axes.Axes.scatter`.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
# format data
temperature_mean = compute_temperature_features(meter_data.index, temperature_data)
usage_per_day = compute_usage_per_day_feature(meter_data, series_name="meter_value")
df = merge_features([usage_per_day, temperature_mean.temperature_mean])
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_col is None:
temp_col = "temperature_mean"
ax.scatter(df[temp_col], df.meter_value, **kwargs)
ax.set_xlabel("Temperature")
ax.set_ylabel("Energy Use per Day")
if title is not None:
ax.set_title(title)
return ax
| apache-2.0 |
oxpeter/small_fry | compare_clusters.py | 1 | 21145 | #!/usr/bin/env python
"""
module used to compare gene clusters between forg to stat and stat to forg
transitions.
"""
import os
import sys
import re
import argparse
import itertools
import string
import random
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.backends.backend_pdf import PdfPages
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
from genomepy import config
############################################
def define_arguments():
parser = argparse.ArgumentParser(description=
"Compares the gene membership of multiple lists within two directories")
### input options ###
# logging options:
parser.add_argument("-q", "--quiet", action='store_true',default=False,
help="print fewer messages and output details")
parser.add_argument("-o", "--output", type=str, default='genematch.out',
help="specify the filename to save results to")
parser.add_argument("-d", "--directory", type=str,
help="specify the directory to save results to")
# data file options:
parser.add_argument("input", metavar='<CLUSTER_DIR>', type=str, nargs='+',
help="directories containing cluster lists to compare")
parser.add_argument("-c", "--column", type=int, default=0,
help="column in which gene names are found (default = 0)")
parser.add_argument("-t", "--filetype", type=str, default='list|txt',
help="""specify the file filter to apply to each directory
(default = list|txt)""")
parser.add_argument("-f", "--filter", type=str, default=None,
help="""[FILE,COLUMN] Create a filter list from column COLUMN
in file FILE. Comparisons will then only look at genes in each
cluster that are also found in the file specified (useful for
looking at only significantly differentially expressed genes)""")
parser.add_argument("--second_degree", action='store_true',default=False,
help="analyse which clusters are separated by 2 degrees of separation")
parser.add_argument("--jidx", action='store_true',default=False,
help="""Use jaccard index for drawing network instead of item
number. If set, make sure the weights specified are values
between 0 and 1.""")
# viewing options:
parser.add_argument("--display_off", action='store_true',default=False,
help="Do not display plots (images will still be saved to pdf)")
parser.add_argument("-w", "--minweight", type=float, default=2,
help="""The minimum number of items to be shared by two clusters
for a solid blue line to be drawn between them in the network. If there
are less than this number, a blue dotted line will be drawn instead
(default = 2)""")
parser.add_argument("-W", "--maxweight", type=float, default=10,
help="""The minimum number of items to be shared by two clusters
for a thick solid black line to be drawn between them in the
network. If there are less than this number, a solid blue line will
be drawn instead (default = 10)""")
parser.add_argument("-n", "--network", type=int, default=0,
help="""Calculate and display a weighted network graph.
1: circular, 2: random, 3: spectral, 4: spring, 5: shell,
6:pygraphviz""")
parser.add_argument('-D', "--dimensions", type=int, default=2,
help="chose between 2 or 3 dimensions")
return parser
def jaccard(s0, s1):
"returns the Jaccard index of two sets"
denom = len((s0 | s1))
if denom == 0:
return -1.
else:
return 1. * len((s0 & s1)) / denom
def update_dic(dic, name, score, partner, jidx, reciprocal=True):
assert isinstance(score, int), "score is not an integer: %r" % score
assert isinstance(jidx, float), "jaccard idx is not a float: %r" % jidx
if name in dic:
if score > dic[name][1]:
dic[name] = (partner, score, jidx)
else:
dic[name] = (partner, score, jidx)
if reciprocal:
if partner in dic:
if score > dic[partner][1]:
dic[partner] = (name, score, jidx)
else:
dic[partner] = (name, score, jidx)
def filter_list(list, filterlist, filter=True):
if filter:
return [ i for i in list if i in filterlist ]
else:
return list
def clean_filename(filename):
fsearch = re.search("([FS]2[FS]).cluster_([0-9]+)",filename) # for the FS24 cluster names
csearch = re.search("cluster(\w+)",filename) # cluster search to find cluster id
gsearch = re.search("(\d+)", os.path.basename(filename)) # very generic search to pull a unique id
if fsearch:
return fsearch.group(1)[0] + fsearch.group(1)[-1] + ' ' + fsearch.group(2)
elif csearch:
return csearch.group(1)
elif gsearch:
return os.path.basename(filename)[0:5] + "_" + gsearch.group(1)
else:
return os.path.basename(filename)[0:5]
def collect_weights(file_lists, store_jidx=False):
"""
INPUT:
file_lists = { 0: [(name1,list1),(name2, list2)], 1:[(name3,list3),(name4, list4)] }
store_jidx -> if False, then score will be appended to dictionary. if True, then jidx.
"""
# compare each set and save best matches to dictionary best_match
best_match = {}
network_weights = {} # to collect the number of items shared between a pair of clusters
cluster_sizes = {} # to collect the total number of items in each cluster.
if len(file_lists.keys()) > 1:
dir_iter = itertools.permutations(file_lists.keys(), 2)
elif len(file_lists.keys()) == 1:
dir_iter = ((0, 0),)
else:
verbalise("R", "No list supplied!! Exiting")
exit()
for dir_pair in dir_iter:
for cpair in itertools.product(file_lists[dir_pair[0]], file_lists[dir_pair[1]]):
s0n = cpair[0][0]
s1n = cpair[1][0]
if s0n == s1n:
continue
s0 = set(cpair[0][1])
s1 = set(cpair[1][1])
score = len(s0 & s1)
jidx = jaccard(s1, s0)
update_dic(best_match, s0n, score, s1n, jidx)
cluster_sizes[s0n] = len(s0)
cluster_sizes[s1n] = len(s1)
if store_jidx:
val = jidx
else:
val = score
if score != 0:
if s0n in network_weights:
network_weights[s0n][s1n] = val
else:
network_weights[s0n] = {s1n:val}
if s1n in network_weights:
network_weights[s1n][s0n] = val
else:
network_weights[s1n] = {s0n:val}
return best_match, network_weights, cluster_sizes
def draw_network(network_weights, maxweight=20, minweight=10, dims=2, report=None, display=True):
"""
Given a dictionary of weights between nodes, draws the network structure.
input:
node1[node2] = weight
"""
df = pd.DataFrame(network_weights).fillna(0)
cluster_array = df.values
dt = [('len', float)]
cluster_array = cluster_array.view(dt)
# create networkx object:
G = nx.from_numpy_matrix(cluster_array)
relabel = dict(zip(range(len(G.nodes())),[ clean_filename(f) for f in df.columns]))
if dims == 2:
G = nx.relabel_nodes(G, relabel)
# create dictionary to convert names back to positional labels:
#backlabels = dict(zip([ clean_filename(f) for f in df.columns], range(len(G.nodes()))))
#print backlabels.items()[:5]
# add weights to each edge for later processing:
e = [ (n1, n2, G[n1][n2]['len']) for n1 in G for n2 in G[n1] ]
G.add_weighted_edges_from(e)
# define the type of graph to be drawn:
network_types = {1: nx.circular_layout,
2: nx.random_layout,
3: nx.spectral_layout,
4: nx.spring_layout,
5: nx.shell_layout,
6: nx.pygraphviz_layout}
net_type = network_types[args.network]
# check for help in parsing the network objects:
if 'S2F99' in G:
verbalise( "G['S2F99'] --> %s" % (G['S2F99']))
# split into all sub-networks and draw each one:
if dims == 3:
from mayavi import mlab
pos=net_type(G, dim=dims, k=0.15)
else:
pos=net_type(G)
C = nx.connected_component_subgraphs(G)
# initialise pdf for saving all plots:
if report:
pp = PdfPages( report[:-3] + 'pdf' )
for g in C:
# report size of sub-network
verbalise("Y", "%d clusters in sub-network" % (len(g)) )
# define which edges are drawn bold:
rlarge = [(u,v,d) for (u,v,d) in g.edges(data=True) if d['weight'] >= maxweight]
rmedium =[(u,v,d) for (u,v,d) in g.edges(data=True) if maxweight > d['weight'] >= minweight]
rsmall = [(u,v,d) for (u,v,d) in g.edges(data=True) if d['weight'] < minweight]
elarge = [ (u,v) for (u,v,d) in rlarge ]
emedium = [ (u,v) for (u,v,d) in rmedium ]
esmall = [ (u,v) for (u,v,d) in rsmall ]
rlarge.sort(key=lambda x: x[2]['weight'])
rmedium.sort(key=lambda x: x[2]['weight'])
rsmall.sort(key=lambda x: x[2]['weight'])
# report number of clusters with each weight
verbalise("M", "%d cluster pairs with %d or more shared members" % (len(elarge),maxweight))
verbalise("G", "\n".join([ "%-6r %-6r %s" % (t[0], t[1], t[2]['weight']) for t in rlarge]))
verbalise("M", "%d cluster pairs with less than %d and %d or more shared members" % (len(emedium),maxweight, minweight))
verbalise("G", "\n".join(["%-6r %-6r %s" % (t[0], t[1], t[2]['weight']) for t in rmedium][-3:]))
verbalise("M", "%d cluster pairs with less than %d shared members" % (len(esmall),minweight))
verbalise("G", "\n".join(["%-6r %-6r %s" % (t[0], t[1], t[2]['weight']) for t in rsmall][-3:]))
verbalise("G","")
if report:
handle = open(report, 'a')
handle.write("%d clusters in sub-network\n" % (len(g)))
handle.write("%d cluster pairs with %d or more shared members\n" % (len(elarge),maxweight))
handle.write("%d cluster pairs with less than %d and %d or more shared members\n" % (len(emedium),maxweight, minweight))
handle.write("%d cluster pairs with less than %d shared members\n" % (len(esmall),minweight))
handle.write("\n".join(["%-6r %-6r %s" % (t[0], t[1], t[2]['weight']) for t in rlarge]) + '\n')
handle.write("\n".join(["%-6r %-6r %s" % (t[0], t[1], t[2]['weight']) for t in rmedium]) + '\n')
handle.write("\n".join(["%-6r %-6r %s" % (t[0], t[1], t[2]['weight']) for t in rsmall]) + '\n\n')
handle.close()
if dims == 2:
# draw edges (partitioned by each edge type):
# large:
nx.draw_networkx_edges(g,pos,edgelist=elarge,
width=2, edge_color='purple')
# medium:
nx.draw_networkx_edges(g,pos,edgelist=emedium,
width=1, alpha=0.6, edge_color='blue')
# small:
nx.draw_networkx_edges(g,pos,edgelist=esmall,
width=1, alpha=0.3, edge_color='blue', style='dashed')
# draw sub-network:
nx.draw(g,
pos,
node_size=40,
node_color=[ {'F':'g', 'S':'b', '_':'r'}[n[0]] for n in g ],
vmin=0.0,
vmax=2.0,
width=0,
with_labels=True
)
if report:
plt.savefig(pp, format='pdf')
if display:
plt.show()
else:
plt.close()
if dims == 3:
xyz=np.array([pos[n] for n in g ])
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
for shape, edges, colour in [('sphere', elarge, (0.5,0.9,0.2)),
('sphere', emedium, (0.2,0.5,0.9)),
('sphere', esmall, (0.9,0.2,0.5)),
]:
pts = mlab.points3d(xyz[:,0], xyz[:,1], xyz[:,2],
[ {'F':(4), 'S':(6), '_':(8)}[relabel[n][0]] for n in g ],
colormap = 'copper',
scale_factor=0.1,
scale_mode='none',
resolution=20,
mode=shape,
vmax=10,
vmin=0)
edge_array = np.array([ list(t) for t in edges ])
pts.mlab_source.dataset.lines = edge_array
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
mlab.pipeline.surface(tube, color=colour, vmin=0, vmax=10)
if display:
mlab.show()
if report:
pp.close()
return rlarge, rmedium, rsmall
def plot_heatmap(weights, outfile=None, samplesx=None, samplesy=None):
"""
INPUT:
weights -> a dictionary of values for each XY pair: weights[X][Y] = w
outfile -> file to save heatmap image to
samplex -> list of sample names for X axis. If None, all samples will be used.
sampley -> list of sample names for Y axis. If None, all samples will be used
"""
df = pd.DataFrame( weights )
if not samplesx:
samplesx = samplesy
elif not samplesy:
samplesy = samplesx
if samplesx:
selectx = [ name for name in samplesx if name in df.columns ]
selecty = [ name for name in samplesy if name in list(df.index.values) ]
namesx = [ clean_filename(name) for name in samplesx if name in df.columns ]
namesy = [ clean_filename(name) for name in samplesy if name in list(df.index.values) ]
df = df[selectx].loc[selecty]
X = np.nan_to_num(df.values)
else:
X = np.nan_to_num(df.values)
namesx = [ clean_filename(name) for name in df.columns ]
namesy = [ clean_filename(name) for name in df.columns ]
#verbalise("B", df.columns)
#verbalise("C", namesx)
#verbalise("Y", namesy)
# plot top dendrogram
fig = plt.figure(figsize=(8, 8))
axx = fig.add_axes([0.22,0.76,0.6,0.2], frame_on=True)
dx = dist.pdist(X.T)
Dx = dist.squareform(dx)
Yx = sch.linkage(Dx, method='complete', metric='euclidean')
#np.clip(Yx[:,2], 0, 100000, Yx[:,2]) # prevents errors from negative floating point near zero
Zx = sch.dendrogram(Yx)
# plot left dendrogram
axy = fig.add_axes([0.01,0.15,0.2,0.6], frame_on=True)
dy = dist.pdist(X)
Dy = dist.squareform(dy) # full matrix
Yy = sch.linkage(Dy, method='complete', metric='euclidean')
Zy = sch.dendrogram(Yy, orientation='right')
# remove ticks from dendrograms
axx.set_xticks([])
axx.set_yticks([])
axy.set_xticks([])
axy.set_yticks([])
# reorder matrices
indexx = Zx['leaves']
indexy = Zy['leaves']
Dy = Dy[indexy,:]
Dy = Dy[:,indexx]
X = X[indexy,:]
X = X[:,indexx]
newnamesx = [ namesx[i] for i in indexx ]
newnamesy = [ namesy[i] for i in indexy ]
# display distance matrix
axmatrix = fig.add_axes([0.22,0.15,0.6,0.6])
im = axmatrix.matshow(X, aspect='auto', origin='lower')
# set position and naming of axes
axmatrix.set_xticks(range(len(newnamesx)))
axmatrix.set_yticks(range(len(newnamesy)))
axmatrix.set_xticklabels(newnamesx, rotation=90)
axmatrix.set_yticklabels(newnamesy)
axmatrix.xaxis.tick_bottom()
axmatrix.yaxis.tick_right()
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
if outfile:
plt.savefig(outfile)
plt.show()
if __name__ == '__main__':
parser = define_arguments()
args = parser.parse_args()
verbalise = config.check_verbose(not(args.quiet))
logfile = config.create_log(args, outdir=args.directory, outname=args.output)
outfile = logfile[:-3] + "out"
# check that weights and scoring method are compatible (and meaningful):
if args.jidx and ( args.minweight > 1 or args.maxweight > 1 ):
verbalise("R", """You have selected the jaccard index for measuring distance
between samples, but have selected values greater than 1 for your minimum and
maximum weights to display in the network graph.""")
exit()
if args.filter:
filterset = config.make_a_list(args.filter.split(',')[0], int(args.filter.split(',')[1]))
else:
filterset = []
# collect lists of genes to compare:
file_lists = {}
verbalise("B", "Collecting lists of files using %s filter" % args.filetype)
for i, dir in enumerate(args.input):
file_lists[i] = [ (os.path.join(dir,f),
filter_list(config.make_a_list(os.path.join(dir,f),args.column),
filterset,
args.filter)) for f in os.listdir(dir) if re.search(args.filetype, f) ]
verbalise("Y", "%d files found for dir %s" % (len(file_lists[i]), dir))
# compare each set and save best matches to dictionary best_match
best_match, network_weights, cluster_sizes = collect_weights(file_lists, store_jidx=args.jidx)
# write best matches to file:
handle = open( outfile, 'w')
for file in best_match:
handle.write( "%s %s %d %.2f\n" % (os.path.basename(file),
os.path.basename(best_match[file][0]),
best_match[file][1], best_match[file][2]))
handle.close()
verbalise("Y", "FILE1\tFILE2\t# matches\tJaccard Index")
os.system("sort -k 4,4n " + outfile)
if args.network:
edge_weights = draw_network(network_weights,
minweight=args.minweight,
maxweight=args.maxweight,
dims=args.dimensions,
report=outfile,
display=(not args.display_off),
)
if not args.display_off:
# determine the maximum bin size to fix both histograms to same x-axis range:
max_x = max(cluster_sizes.values())
# calculate histograms (separate y-axes)
fig, ax1 = plt.subplots()
ax1.hist([ network_weights[p1][p2] for p1 in network_weights for p2 in network_weights[p1]],
bins=26,
color='red',
alpha=0.6,
range=[0,max_x])
ax1.set_xlabel('number of genes shared/\nnumber of genes in clusters', color='black')
ax1.set_ylabel('number of edges', color='r')
ax2 = ax1.twinx()
ax2.hist( cluster_sizes.values(),
bins = 26,
color='green',
alpha=0.6,
range=[0,max_x])
ax2.set_ylabel('number of clusters', color='g')
plt.show()
else:
plt.close()
# get names of samples from each list of clusters:
x_list = [ l[0] for l in file_lists[1] if l[0] in network_weights]
y_list = [ l[0] for l in file_lists[0] if l[0] in network_weights]
plot_heatmap(network_weights,
logfile[:-3] + 'heatmap.pdf',
x_list,
y_list)
if args.second_degree:
# calculate closest clusters from same folder (based on 2 degrees of separation):
independent_sets = []
for n1 in network_weights:
independent_sets.append((n1,
[ n2 for n2 in network_weights[n1] if network_weights[n1][n2] >0 and n1 != n2 ]))
comp_dic = {0:independent_sets}
d2best_match, d2network_weights, d2cluster_sizes = collect_weights(comp_dic, store_jidx=args.jidx)
if args.network:
edge_weights = draw_network(d2network_weights,
minweight=args.minweight,
maxweight=args.maxweight,
dims=args.dimensions,
report=logfile[:-3] + "2nd_degree.out",
display=(not args.display_off),
)
plot_heatmap(d2network_weights,
logfile[:-3] + 'heatmap1.pdf',
x_list)
plot_heatmap(d2network_weights,
logfile[:-3] + 'heatmap2.pdf',
y_list)
| gpl-2.0 |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_10_30_2014_server_4.py | 1 | 40790 |
# coding: utf-8
# In[3]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[4]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
settings['fisher_mode'] = 'FisherM1'
settings['predicted_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['DL'] = 1
settings['SAE_SVM'] = 0
settings['SVM_RBF'] = 0
settings['DL_S'] = 1
settings['DL_U'] = 0
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 5004
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 1504
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0,0]
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[5]:
ddis
# In[28]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename)
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename)
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[28]:
# In[29]:
import sklearn.preprocessing
def performance_score(target_label, predicted_label, predicted_score = False, print_report = True):
""" get performance matrix for prediction
Attributes:
target_label: int 0, 1
predicted_label: 0, 1 or ranking
predicted_score: bool if False, predicted_label is from 0, 1. If Ture, predicted_label is ranked, need to get AUC score.
print_report: if True, print the perfromannce on screen
"""
import sklearn
from sklearn.metrics import roc_auc_score
score = {}
if predicted_score == False:
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if predicted_score == True:
auc_score = roc_auc_score(target_label, predicted_label)
score['auc_score'] = auc_score
target_label = [x >= 0.5 for x in target_label]
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if print_report == True:
for key, value in score.iteritems():
print key, '{percent:.1%}'.format(percent=value)
return score
def saveAsCsv(predicted_score, fname, score_dict, *arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if predicted_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerows(arg)
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance(settings = settings)
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
predicted_score = settings['predicted_score']
reduce_ratio = settings['reduce_ratio']
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if settings['SVM']:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no,fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
if settings['DL_S']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(y_test, test_predicted, predicted_score), analysis_scr)
# In[29]:
# In[30]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=settings)
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
predicted_score = settings['predicted_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
if settings['SVM']:
print "SVM"
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
if settings['DL_S']:
# deep learning using split network
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(y_test, test_predicted, predicted_score), analysis_scr)
# In[1]:
ten_fold_crossvalid_performance_for_all(ddis[:])
# In[ ]:
#LOO_out_performance_for_all(ddis)
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
| gpl-2.0 |
mayblue9/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
maciekcc/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
zasdfgbnm/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
remenska/iSDM | tests/test_GBIF.py | 1 | 3363 | import unittest
from iSDM.species import GBIFSpecies
import pandas as pd
import geopandas as gp
from shapely.geometry import Point
import numpy as np
class TestGBIF(unittest.TestCase):
def setUp(self):
self.test_species = GBIFSpecies(name_species="Pseudecheneis crassicauda")
self.test_species1 = GBIFSpecies(name_species="Some_nonsense")
self.test_species2 = GBIFSpecies(name_species="Etheostoma blennioides")
def test_GBIF_find_species_occurrences(self):
self.test_species.find_species_occurrences()
self.assertEqual(self.test_species.ID, 2341467)
self.assertEqual(self.test_species.name_species, "Pseudecheneis crassicauda")
self.assertIsInstance(self.test_species.data_full, pd.DataFrame)
# with self.assertRaises(ValueError):
# test_species1.find_species_occurrences()
data_empty = self.test_species1.find_species_occurrences()
self.assertIsInstance(data_empty, pd.DataFrame)
self.assertEqual(data_empty.empty, True)
def test_GBIF_load_csv(self):
self.test_species2.load_csv("./data/GBIF.csv")
self.assertEqual(self.test_species2.ID, 2382397)
self.assertIsInstance(self.test_species2.data_full, pd.DataFrame)
self.assertIsNotNone(self.test_species2.data_full)
def test_GBIF_geometrize(self):
self.test_species.find_species_occurrences()
self.test_species.geometrize()
self.assertIsInstance(self.test_species.data_full, gp.GeoDataFrame)
self.assertIsNotNone(self.test_species.data_full.geometry)
self.assertIsInstance(self.test_species.data_full.geometry, gp.geoseries.GeoSeries)
self.assertIsInstance(self.test_species.data_full.geometry.iat[0], Point)
self.assertIsNotNone(self.test_species.data_full.crs)
self.test_species.find_species_occurrences()
number_point_records = self.test_species.data_full.shape[0]
self.test_species.geometrize(dropna=False)
self.assertEqual(number_point_records, self.test_species.data_full.shape[0])
def test_GBIF_rasterize(self):
# with self.assertRaises(AttributeError):
# self.test_species.rasterize()
self.test_species2.load_csv("./data/GBIF.csv")
# self.test_species2.find_species_occurrences()
pixel_size = 1 # 0.008333333 # = 0.5/60
number_point_records = self.test_species2.data_full.shape[0]
# self.test_species.load_shapefile("./data/fish/selection/acrocheilus_alutaceus/acrocheilus_alutaceus.shp")
result = self.test_species2.rasterize(pixel_size=pixel_size, raster_file="./data/fish/tmp.tif", all_touched=True)
transform = self.test_species2.raster_affine
self.assertEqual(result.shape, (int(np.abs(transform.yoff) * (2 / pixel_size)), int(np.abs(transform.xoff) * (2 / pixel_size))))
self.assertIsInstance(result, np.ndarray)
self.assertEqual(set(np.unique(result)), {0, 1})
self.assertGreater(number_point_records, np.sum(result))
result1 = self.test_species2.rasterize(pixel_size=0.5, no_data_value=55, default_value=11)
self.assertEqual(set(np.unique(result1)), {55, 11})
def tearDown(self):
del self.test_species
del self.test_species1
del self.test_species2
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
OSCAAR/OSCAAR | oscaar/IO.py | 2 | 6274 | """
OSCAAR v2.0
Module for differential photometry
Developed by Brett Morris, 2011-2013
"""
from glob import glob
from re import split
import cPickle
from shutil import copy
import os
from matplotlib import pyplot as plt
def cd(a=None):
"""
Change to a different directory than the current one.
Parameters
----------
a : string
Location of the directory to change to.
Notes
-----
If `a` is empty, this function will change to the parent directory.
"""
if a is None:
os.chdir(os.pardir)
else:
os.chdir(str(a))
def cp(a, b):
"""
Copy a file to another location.
Parameters
----------
a : string
Path of the file to be copied.
b : string
Location where the file will be copied to.
"""
copy(str(a), str(b))
def parseRegionsFile(regsPath):
"""
Parse a regions file for a set of data.
Parameters
----------
regsPath : string
Location of the regions file to be parsed.
Returns
-------
init_x_list : array
An array containing the x-values of the parsed file.
init_y_list : array
An array containing the y-values of the parsed file.
"""
regionsData = open(regsPath, 'r').read().splitlines()
init_x_list = []
init_y_list = []
for i in range(0, len(regionsData)):
if regionsData[i][0:6] == 'circle':
y, x = split("\,", split("\(", regionsData[i])[1])[0:2]
init_y_list.append(float(y))
init_x_list.append(float(x))
return init_x_list, init_y_list
def save(data, outputPath):
"""
Save everything in oscaar.dataBank object <data> to a python pickle
using cPickle.
Parameters
----------
data : string
File location of an oscaar.dataBank() object to save.
outputPath : string
Path to which the numpy-pickle will be saved.
"""
# Over-write check
if glob(outputPath) > 0 \
or glob(outputPath+os.sep+'oscaarDataBase.pkl') > 0 \
or glob(outputPath+'.pkl') > 0:
print 'WARNING: could potentially overwrite the most recent oscaarDataBase.pkl'
if outputPath.endswith('.pkl') or outputPath.endswith('.PKL'):
outputName = outputPath
elif outputPath[-1] == os.sep:
outputName = outputPath+'oscaarDataBase.pkl'
else:
outputName = outputPath+'.pkl'
# cPickle can not save functions, so delete the function data.convertToJD
# before saving the object data
try:
del data.convertToJD
except:
pass
output = open(outputName, 'wb')
cPickle.dump(data, output)
output.close()
def load(inputPath):
"""
Load everything from a oscaar.dataBank() object in a python pickle
using cPickle.
Parameters
----------
inputPath : string
File location of an oscaar.dataBank() object to save into a pickle.
Returns
-------
data : string
Path for the saved numpy-pickle.
"""
inputFile = open(inputPath, 'rb')
data = cPickle.load(inputFile)
inputFile.close()
return data
def plottingSettings(trackPlots, photPlots, statusBar=True):
"""
**Description :** Function for handling matplotlib figures
across OSCAAR methods.
Parameters
----------
trackPlots : bool
Used to turn the astrometry plots on and off.
photPlots : bool
Used to turn the aperture photometry plots on and off.
statusBar : bool, optional
Used to turn the status bar on and off.
Returns
-------
[fig, subplotsDimensions, photSubplotsOffset] : [figure, int, int]
An array with 3 things. The first is the figure object from
matplotlib that will be displayed while OSCAAR is running.
The second is the integer value that designates the x and y
dimensions of the subplots within the figure plot. The third
is the the number correlating to the location of the aperture
photometry plots, which depends on the values of trackPlots and
photPlots.
statusBarFig : figure
A figure object from matplotlib showing the status bar for
completion.
statusBarAx : figure.subplot
A subplot from a matplotlib figure object that represents what
is drawn.
Notes
-----
This list returned by plottingSettings() should be stored to a variable,
and used as an argument in the phot() and trackSmooth() methods.
"""
if trackPlots or photPlots:
plt.ion()
statusBarFig = 0
statusBarAx = 0
if trackPlots and photPlots:
fig = plt.figure(num=None, figsize=(18, 3), facecolor='w', edgecolor='k')
fig.subplots_adjust(wspace=0.5)
subplotsDimensions = 140
photSubplotsOffset = 3
statusSubplotOffset = 6
statusBarAx = None
fig.canvas.set_window_title('oscaar2.0')
elif photPlots and not trackPlots:
fig = plt.figure(num=None, figsize=(5, 5), facecolor='w', edgecolor='k')
fig.subplots_adjust(wspace=0.5)
subplotsDimensions = 110
photSubplotsOffset = 0
statusSubplotOffset = 2
statusBarAx = None
fig.canvas.set_window_title('oscaar2.0')
elif trackPlots and not photPlots:
fig = plt.figure(num=None, figsize=(13.5, 4), facecolor='w', edgecolor='k')
fig.subplots_adjust(wspace=0.5)
subplotsDimensions = 130
photSubplotsOffset = 0
statusSubplotOffset = 5
statusBarAx = None
fig.canvas.set_window_title('oscaar2.0')
elif not trackPlots and not photPlots:
statusBarFig = plt.figure(num=None, figsize=(5, 2), facecolor='w', edgecolor='k')
statusBarFig.canvas.set_window_title('oscaar2.0')
statusBarAx = statusBarFig.add_subplot(111, aspect=10)
statusBarAx.set_title('oscaar2.0 is running...')
statusBarAx.set_xlim([0, 100])
statusBarAx.set_xlabel('Percent Complete (%)')
statusBarAx.get_yaxis().set_ticks([])
subplotsDimensions = 111
photSubplotsOffset = 0
fig = 0
subplotsDimensions = 0
photSubplotsOffset = 0
return [fig, subplotsDimensions, photSubplotsOffset], statusBarFig, statusBarAx
| mit |
monash-merc/cvl-fabric-launcher | pyinstaller-2.1/setup.py | 6 | 6544 | #! /usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import stat
from setuptools import setup, find_packages
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
from PyInstaller import get_version
import PyInstaller.utils.git
DESC = ('Converts (packages) Python programs into stand-alone executables, '
'under Windows, Linux, Mac OS X, AIX and Solaris.')
LONG_DESC = """
PyInstaller is a program that converts (packages) Python
programs into stand-alone executables, under Windows, Linux, Mac OS X,
AIX and Solaris. Its main advantages over similar tools are that
PyInstaller works with any version of Python since 2.3, it builds smaller
executables thanks to transparent compression, it is fully multi-platform,
and uses the OS support to load the dynamic libraries, thus ensuring full
compatibility.
The main goal of PyInstaller is to be compatible with 3rd-party packages
out-of-the-box. This means that, with PyInstaller, all the required tricks
to make external packages work are already integrated within PyInstaller
itself so that there is no user intervention required. You'll never be
required to look for tricks in wikis and apply custom modification to your
files or your setup scripts. As an example, libraries like PyQt, Django or
matplotlib are fully supported, without having to handle plugins or
external data files manually.
"""
CLASSIFIERS = """
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Console
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Other Audience
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: GNU General Public License v2 (GPLv2)
Classifier: Natural Language :: English
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: POSIX
Classifier: Operating System :: POSIX :: AIX
Classifier: Operating System :: POSIX :: Linux
Classifier: Operating System :: POSIX :: SunOS/Solaris
Classifier: Programming Language :: C
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 2 :: Only
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Software Development
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: System :: Installation/Setup
Classifier: Topic :: System :: Software Distribution
Classifier: Topic :: Utilities
""".splitlines()
# Make the distribution files to always report the git-revision used
# then building the distribution packages. This is done by replacing
# PyInstaller/utils/git.py within the dist/build by a fake-module
# which always returns the current git-revision. The original
# source-file is unchanged.
#
# This has to be done in 'build_py' for bdist-commands and in 'sdist'
# for sdist-commands.
def _write_git_version_file(filename):
"""
Fake PyInstaller.utils.git.py to always return the current revision.
"""
git_version = PyInstaller.utils.git.get_repo_revision()
st = os.stat(filename)
# remove the file first for the case it's hard-linked to the
# original file
os.remove(filename)
git_mod = open(filename, 'w')
template = "def get_repo_revision(): return %r"
try:
git_mod.write(template % git_version)
finally:
git_mod.close()
os.chmod(filename, stat.S_IMODE(st.st_mode))
class my_build_py(build_py):
def build_module(self, module, module_file, package):
res = build_py.build_module(self, module, module_file, package)
if module == 'git' and package == 'PyInstaller.utils':
filename = self.get_module_outfile(
self.build_lib, package.split('.'), module)
_write_git_version_file(filename)
return res
class my_sdist(sdist):
def make_release_tree(self, base_dir, files):
res = sdist.make_release_tree(self, base_dir, files)
build_py = self.get_finalized_command('build_py')
filename = build_py.get_module_outfile(
base_dir, ['PyInstaller', 'utils'], 'git')
_write_git_version_file(filename)
return res
setup(
install_requires=['distribute'],
name='PyInstaller',
version=get_version(),
description=DESC,
long_description=LONG_DESC,
keywords='packaging, standalone executable, pyinstaller, macholib, freeze, py2exe, py2app, bbfreeze',
author='Giovanni Bajo, Hartmut Goebel, Martin Zibricky',
author_email='[email protected]',
maintainer='Giovanni Bajo, Hartmut Goebel, Martin Zibricky',
maintainer_email='[email protected]',
license=('GPL license with a special exception which allows to use '
'PyInstaller to build and distribute non-free programs '
'(including commercial ones)'),
url='http://www.pyinstaller.org',
download_url='https://sourceforge.net/projects/pyinstaller/files',
classifiers=CLASSIFIERS,
zip_safe=False,
packages=find_packages(),
package_data={
# This includes precompiled bootloaders.
'PyInstaller': ['bootloader/*/*'],
# This file is necessary for rthooks (runtime hooks).
'PyInstaller.loader': ['rthooks.dat'],
},
include_package_data=True,
cmdclass = {
'sdist': my_sdist,
'build_py': my_build_py,
},
entry_points="""
[console_scripts]
pyinstaller=PyInstaller.main:run
pyi-archive_viewer=PyInstaller.cliutils.archive_viewer:run
pyi-bindepend=PyInstaller.cliutils.bindepend:run
pyi-build=PyInstaller.cliutils.build:run
pyi-grab_version=PyInstaller.cliutils.grab_version:run
pyi-make_comserver=PyInstaller.cliutils.make_comserver:run
pyi-makespec=PyInstaller.cliutils.makespec:run
pyi-set_version=PyInstaller.cliutils.set_version:run
"""
)
| gpl-3.0 |
mlperf/training_results_v0.7 | Inspur/benchmarks/dlrm/implementations/implementation_closed/dlrm/utils/metrics.py | 1 | 3623 | """Customized implementation of metrics"""
import numpy as np
import torch
def ref_roc_auc_score(y_true, y_score, exact=True):
"""Compute AUC exactly the same as sklearn
sklearn.metrics.roc_auc_score is a very genalized function that supports all kind of situation.
See https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_ranking.py. The AUC computation
used by DLRM is a very small subset. This function is bear minimum codes of computing AUC exactly the same way
as sklearn numerically.
A lot of things are removed:
Anything is not required by binary class.
thresholds is not returned since we only need score.
Args:
y_true (ndarray or list of array):
y_score (ndarray or list of array):
exact (bool): If False, skip some computation used in sklearn. Default True
"""
y_true = np.r_[y_true].flatten()
y_score = np.r_[y_score].flatten()
if y_true.shape != y_score.shape:
raise TypeError(F"Shapre of y_true and y_score must match. Got {y_true.shape} and {y_score.shape}.")
# sklearn label_binarize y_true which effectively make it integer
y_true = y_true.astype(np.int)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = np.cumsum(y_true)[threshold_idxs]
fps = 1 + threshold_idxs - tps
if exact:
# Attempt to drop thresholds corresponding to points in between and collinear with other points.
if len(fps) > 2:
optimal_idxs = np.where(np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
# Add an extra threshold position to make sure that the curve starts at (0, 0)
tps = np.r_[0, tps]
fps = np.r_[0, fps]
fpr = fps / fps[-1]
tpr = tps / tps[-1]
direction = 1
if exact:
# I don't understand why it is needed since it is sorted before
if np.any(np.diff(fpr) < 0):
direction = -1
area = direction * np.trapz(tpr, fpr)
return area
def roc_auc_score(y_true, y_score):
"""Pytorch implementation almost follows sklearn
Args:
y_true (Tensor):
y_score (Tensor):
"""
device = y_true.device
y_true.squeeze_()
y_score.squeeze_()
if y_true.shape != y_score.shape:
raise TypeError(F"Shapre of y_true and y_score must match. Got {y_true.shape()} and {y_score.shape()}.")
desc_score_indices = torch.argsort(y_score, descending=True)
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
distinct_value_indices = torch.nonzero(y_score[1:] - y_score[:-1], as_tuple=False).squeeze()
threshold_idxs = torch.cat([distinct_value_indices, torch.tensor([y_true.numel() - 1], device=device)])
tps = torch.cumsum(y_true, dim=0)[threshold_idxs]
fps = 1 + threshold_idxs - tps
tps = torch.cat([torch.zeros(1, device=device), tps])
fps = torch.cat([torch.zeros(1, device=device), fps])
fpr = fps / fps[-1]
tpr = tps / tps[-1]
area = torch.trapz(tpr, fpr)
return area
| apache-2.0 |
larsmans/scikit-learn | sklearn/tests/test_common.py | 2 | 16115 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.base import (ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin)
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_parameters_default_constructible,
check_regressors_classifiers_sparse_data,
check_transformer,
check_clustering,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_sparse_data,
check_transformer_pickle,
check_estimators_nan_inf,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_cluster_overwrite_params,
check_sparsify_binary_classifier,
check_sparsify_multiclass_classifier,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_estimators_sparse_data():
# All estimators should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
estimators = all_estimators()
estimators = [(name, Estimator) for name, Estimator in estimators
if issubclass(Estimator, (ClassifierMixin, RegressorMixin))]
for name, Estimator in estimators:
yield check_regressors_classifiers_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_sparse_data, name, Transformer
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
def test_estimators_nan_inf():
# Test that all estimators check their input for NaN's and infs
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if (issubclass(E, ClassifierMixin) or
issubclass(E, RegressorMixin) or
issubclass(E, TransformerMixin) or
issubclass(E, ClusterMixin))]
for name, Estimator in estimators:
if name not in CROSS_DECOMPOSITION + ['Imputer']:
yield check_estimators_nan_inf, name, Estimator
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_cluster_overwrite_params, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
def test_estimators_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
for est_type in ["classifier", "regressor", "transformer"]:
estimators = all_estimators(type_filter=est_type)
for name, Estimator in estimators:
if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_sparsify_estimators():
#Test if predict with sparsified estimators works.
#Tests regression, binary classification, and multi-class classification.
estimators = all_estimators()
# test regression and binary classification
for name, Estimator in estimators:
try:
Estimator.sparsify
yield check_sparsify_binary_classifier, name, Estimator
except:
pass
# test multiclass classification
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
try:
Classifier.sparsify
yield check_sparsify_multiclass_classifier, name, Classifier
except:
pass
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif name in CROSS_DECOMPOSITION or (
name in ['LinearSVC', 'LogisticRegression']
):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/datasets/samples_generator.py | 35 | 56035 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=True,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
Y = MultiLabelBinarizer().fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
TAMU-CPT/galaxy-tools | tools/genome_viz/dna_features_viewer/CircularGraphicRecord/ArrowWedge.py | 1 | 3479 | """Implements the missing Matplotlib ArrowWedge patch class.
This is a plain arrow curved alongside a protion of circle, like you would
expect a circular genetic feature to look.
"""
import numpy as np
import matplotlib.patches as mpatches
class ArrowWedge(mpatches.Wedge):
"""Matplotlib patch shaped as a tick fraction of circle with a pointy end.
This is the patch used by CircularGraphicRecord to draw features.
Parameters
----------
center
Center of the circle around which the arrow-wedge is drawn.
radius
Radius of the circle around which the arrow-wedge is drawn.
theta1
Start angle of the wedge
theta2
End angle of the wedge
width
Width or thickness of the arrow-wedge.
direction
Determines whether the pointy end points in direct sense (+1) or
indirect sense (-1) or no sense at all (0)
"""
def __init__(
self, center, radius, theta1, theta2, width, direction=+1, **kwargs
):
self.direction = direction
self.radius = radius
mpatches.Wedge.__init__(
self, center, radius, theta1, theta2, width, **kwargs
)
self._recompute_path()
def _recompute_path(self):
"""Recompute the full path forming the "tick" arrowed wedge
This method overwrites "mpatches.Wedge._recompute_path" in the
super-class.
"""
if self.direction not in [-1, +1]:
return mpatches.Wedge._recompute_path(self)
theta1, theta2 = self.theta1, self.theta2
arrow_angle = min(5, abs(theta2 - theta1) / 2)
normalized_arrow_width = self.width / 2.0 / self.radius
if self.direction == +1:
angle_start_arrow = theta1 + arrow_angle
arc = mpatches.Path.arc(angle_start_arrow, theta2)
outer_arc = arc.vertices[::-1] * (1 + normalized_arrow_width)
inner_arc = arc.vertices * (1 - normalized_arrow_width)
arrow_vertices = [
outer_arc[-1],
np.array(
[np.cos(np.deg2rad(theta1)), np.sin(np.deg2rad(theta1))]
),
inner_arc[0],
]
else:
angle_start_arrow = theta2 - arrow_angle
arc = mpatches.Path.arc(theta1, angle_start_arrow)
outer_arc = (
arc.vertices * (self.radius + self.width / 2.0) / self.radius
)
inner_arc = (
arc.vertices[::-1]
* (self.radius - self.width / 2.0)
/ self.radius
)
arrow_vertices = [
outer_arc[-1],
np.array(
[np.cos(np.deg2rad(theta2)), np.sin(np.deg2rad(theta2))]
),
inner_arc[0],
]
p = np.vstack([outer_arc, arrow_vertices, inner_arc])
path_vertices = np.vstack([p, inner_arc[-1, :], (0, 0)])
path_codes = np.hstack(
[
arc.codes,
4 * [mpatches.Path.LINETO],
arc.codes[1:],
mpatches.Path.LINETO,
mpatches.Path.CLOSEPOLY,
]
)
path_codes[len(arc.codes)] = mpatches.Path.LINETO
# Shift and scale the wedge to the final location.
path_vertices *= self.r
path_vertices += np.asarray(self.center)
self._path = mpatches.Path(path_vertices, path_codes)
| gpl-3.0 |
securestate/king-phisher | tools/development/cx_freeze.py | 3 | 6043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tools/development/cx_freeze.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import os
import site
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from king_phisher import version
import matplotlib
import pytz
import requests
from mpl_toolkits import basemap
from cx_Freeze import setup, Executable
is_debugging_build = bool(os.environ.get('DEBUG'))
include_dll_path = os.path.join(site.getsitepackages()[1], 'gnome')
# DLLs and DLL dependencies from site-packages\gnome\ last updated for pygi-aio 3.24.1 rev1
missing_dlls = [
'lib\enchant\libenchant_aspell.dll',
'lib\enchant\libenchant_myspell.dll',
'lib\gio\modules\libgiognomeproxy.dll',
'lib\gio\modules\libgiolibproxy.dll',
'libaspell-15.dll',
'libatk-1.0-0.dll',
'libcairo-gobject-2.dll',
'libdbus-1-3.dll',
'libdbus-glib-1-2.dll',
'libenchant-1.dll',
'libepoxy-0.dll',
'libffi-6.dll',
'libfontconfig-1.dll',
'libfreetype-6.dll',
'libgcrypt-11.dll',
'libgdk_pixbuf-2.0-0.dll',
'libgdk-3-0.dll',
'libgeoclue-0.dll',
'libgio-2.0-0.dll',
'libgirepository-1.0-1.dll',
'libglib-2.0-0.dll',
'libgmodule-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgssapi-3.dll',
'libgstapp-1.0-0.dll',
'libgstaudio-1.0-0.dll',
'libgstbase-1.0-0.dll',
'libgstfft-1.0-0.dll',
'libgstpbutils-1.0-0.dll',
'libgstreamer-1.0-0.dll',
'libgsttag-1.0-0.dll',
'libgstvideo-1.0-0.dll',
'libgtk-3-0.dll',
'libgtksourceview-3.0-1.dll',
'libharfbuzz-0.dll',
'libharfbuzz-icu-0.dll',
'libicu52.dll',
'libintl-8.dll',
'libjasper-1.dll',
'libjavascriptcoregtk-3.0-0.dll',
'libjpeg-8.dll',
'libopenssl.dll',
'liborc-0.4-0.dll',
'libpango-1.0-0.dll',
'libpangocairo-1.0-0.dll',
'libpangoft2-1.0-0.dll',
'libpangowin32-1.0-0.dll',
'libpng16-16.dll',
'libproxy.dll',
'librsvg-2-2.dll',
'libsecret-1-0.dll',
'libsoup-2.4-1.dll',
'libsqlite3-0.dll',
'libstdc++.dll',
'libtiff-5.dll',
'libwebkitgtk-3.0-0.dll',
'libwebp-5.dll',
'libwinpthread-1.dll',
'libxmlxpat.dll',
'libxslt-1.dll',
'libzzz.dll',
'icudt52l.dat',
]
include_files = []
for dll in missing_dlls:
include_files.append((os.path.join(include_dll_path, dll), dll))
gtk_libs = ['etc', 'lib', 'share']
for lib in gtk_libs:
include_files.append((os.path.join(include_dll_path, lib), lib))
# include all site-packages and eggs for pkg_resources to function correctly
for path in os.listdir(site.getsitepackages()[1]):
if os.path.isdir(os.path.join(site.getsitepackages()[1], path)):
include_files.append((os.path.join(site.getsitepackages()[1], path), path))
include_files.append((matplotlib.get_data_path(), 'mpl-data'))
include_files.append((basemap.basemap_datadir, 'mpl-basemap-data'))
include_files.append(('data/client/king_phisher', 'king_phisher'))
include_files.append(('data/king_phisher', 'king_phisher'))
include_files.append((pytz.__path__[0], 'pytz'))
include_files.append((requests.__path__[0], 'requests'))
include_files.append((collections.__path__[0], 'collections'))
# include pip executible
executable_path = os.path.dirname(sys.executable)
include_files.append((os.path.join(executable_path, 'Scripts'), 'Scripts'))
include_files.append((os.path.join(executable_path, 'python.exe'), 'python.exe'))
exe_base = 'Win32GUI'
if is_debugging_build:
exe_base = 'Console'
executables = [
Executable(
'KingPhisher',
base=exe_base,
icon='data/client/king_phisher/king-phisher-icon.ico',
shortcutName='King Phisher',
shortcutDir='ProgramMenuFolder'
)
]
build_exe_options = dict(
include_files=include_files,
packages=[
'_geoslib',
'boltons',
'cairo',
'cffi',
'collections',
'cryptography',
'distutils',
'dns',
'email',
'email_validator',
'geoip2',
'geojson',
'gi',
'graphene',
'graphene_sqlalchemy',
'icalendar',
'idna',
'ipaddress',
'jinja2',
'jsonschema',
'king_phisher.client',
'matplotlib',
'mpl_toolkits',
'msgpack',
'numpy',
'paramiko',
'pil',
'pip',
'os',
'six',
'sys',
'pkg_resources',
'pluginbase',
'qrcode',
'reportlab',
'requests',
'smoke_zephyr',
'tzlocal',
'websocket',
'win32api',
'xlsxwriter',
'yaml',
],
excludes=['jinja2.asyncfilters', 'jinja2.asyncsupport'], # not supported with python 3.4
)
version_build = '.'.join(map(str, version.version_info))
setup(
name='KingPhisher',
author='SecureState',
version=version_build,
comments="Version: {}".format(version.distutils_version),
description='King Phisher Client',
options=dict(build_exe=build_exe_options),
executables=executables
)
| bsd-3-clause |
RayMick/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 41 | 35602 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
kevin-intel/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 71 | 3578 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1]_ and
illustrates the difference in performance between the discrete SAMME [2]_
boosting algorithm and real SAMME.R boosting algorithm. Both algorithms are
evaluated on a binary classification task where the target Y is a non-linear
function of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
ThomasMiconi/nupic.research | htmresearch/frameworks/nlp/classification_metrics.py | 9 | 4754 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
"""
This module contains metrics useful for classification scenarios
"""
def evaluateResults(classifications, references):
"""
Calculate statistics for the predicted classifications against the actual.
@param classifications (tuple) Two lists: (0) predictions and
(1) actual classifications. Items in the predictions list are numpy
arrays of ints or [None], and items in actual classifications list
are numpy arrays of ints.
@param references (list) Classification label strings.
@return (tuple) Returns a 2-item tuple w/ the
accuracy (float) and confusion matrix (numpy array).
"""
accuracy = calculateAccuracy(classifications)
cm = calculateConfusionMatrix(classifications, references)
return (accuracy, cm)
def calculateClassificationResults(classifications):
"""
Calculate the classification accuracy for each category.
@param classifications (list) Two lists: (0) predictions and (1)
actual classifications. Items in the predictions list are lists of
ints or None, and items in actual classifications list are ints.
@return (list) Tuples of class index and accuracy.
"""
if len(classifications[0]) != len(classifications[1]):
raise ValueError("Classification lists must have same length.")
if len(classifications[1]) == 0:
return []
# Get all possible labels
labels = list(set([l for actual in classifications[1] for l in actual]))
labelsToIdx = {l: i for i,l in enumerate(labels)}
correctClassifications = numpy.zeros(len(labels))
totalClassifications = numpy.zeros(len(labels))
for actual, predicted in zip(classifications[1], classifications[0]):
for a in actual:
idx = labelsToIdx[a]
totalClassifications[idx] += 1
if a in predicted:
correctClassifications[idx] += 1
return zip(labels, correctClassifications / totalClassifications)
def calculateAccuracy(classifications):
"""
@param classifications (tuple) First element is list of predicted
labels, second is list of actuals; items are numpy arrays.
@return (float) Correct labels out of total labels,
where a label is correct if it is amongst the actuals.
"""
if len(classifications[0]) != len(classifications[1]):
raise ValueError("Classification lists must have same length.")
if len(classifications[1]) == 0:
return None
accuracy = 0.0
for actual, predicted in zip(classifications[1], classifications[0]):
commonElems = numpy.intersect1d(actual, predicted)
accuracy += len(commonElems)/float(len(actual))
return accuracy/len(classifications[1])
def calculateConfusionMatrix(classifications, references):
"""
Returns confusion matrix as a pandas dataframe.
"""
# TODO: Figure out better way to report multilabel outputs--only handles
# single label now. So for now return empty array.
return numpy.array([])
# if len(classifications[0]) != len(classifications[1]):
# raise ValueError("Classification lists must have same length.")
#
# total = len(references)
# cm = numpy.zeros((total, total+1))
# for actual, predicted in zip(classifications[1], classifications[0]):
# if predicted is not None:
# cm[actual[0]][predicted[0]] += 1
# else:
# # No predicted label, so increment the "(none)" column.
# cm[actual[0]][total] += 1
# cm = numpy.vstack((cm, numpy.sum(cm, axis=0)))
# cm = numpy.hstack((cm, numpy.sum(cm, axis=1).reshape(total+1,1)))
#
# cm = pandas.DataFrame(data=cm,
# columns=references+["(none)"]+["Actual Totals"],
# index=references+["Prediction Totals"])
#
# return cm
| agpl-3.0 |
stylianos-kampakis/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
research-team/robot-dream | Nociception/onefibersimulation.py | 1 | 3044 | from neuron import h, gui
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as pyplot
import math
#neuron.load_mechanisms("./mod")
from cfiber import cfiber
def set_recording_vectors(compartment):
''' recording voltage
Parameters
----------
compartment: NEURON section
compartment for recording
Returns
-------
v_vec: h.Vector()
recorded voltage
t_vec: h.Vector()
recorded time
'''
v_vec = h.Vector() # Membrane potential vector at compartment
t_vec = h.Vector() # Time stamp vector
v_vec.record(compartment(0.5)._ref_v)
t_vec.record(h._ref_t)
return v_vec, t_vec
def balance(cell, vinit=-55):
''' voltage balance
Parameters
----------
cell: NEURON cell
cell for balance
vinit: int (mV)
initialized voltage
'''
for sec in cell.all:
if ((-(sec.ina_nattxs + sec.ina_navv1p8 + sec.ina_Nav1_3 + sec.ina_nakpump) / (vinit - sec.ena)) < 0):
sec.pumpina_extrapump = -(sec.ina_nattxs + sec.ina_navv1p8 + sec.ina_Nav1_3 + sec.ina_nakpump)
else:
sec.gnaleak_leak = -(sec.ina_nattxs + sec.ina_navv1p8 + sec.ina_Nav1_3 + sec.ina_nakpump) / (vinit - sec.ena)
if ((-(sec.ik_kdr + sec.ik_nakpump + sec.ik_kap + sec.ik_kad) / (vinit - sec.ek)) < 0):
sec.pumpik_extrapump = -(sec.ik_kdr + sec.ik_nakpump + sec.ik_kap + sec.ik_kad)
else:
sec.gkleak_leak = -(sec.ik_kdr + sec.ik_nakpump + sec.ik_kap + sec.ik_kad) / (vinit - sec.ek)
def simulate(cell, tstop=500, vinit=-55):
''' simulation control
Parameters
----------
cell: NEURON cell
cell for simulation
tstop: int (ms)
simulation time
vinit: int (mV)
initialized voltage
'''
h.finitialize(vinit)
balance(cell)
if h.cvode.active():
h.cvode.active()
else:
h.fcurrent()
h.frecord_init()
h.tstop = tstop
h.v_init = vinit
h.run()
# running_ = 1
# dt = 40
# dl = 1000
# h.stdinit()
# for n in range(5):
# cell.x_application = cell.x_application + dl
# cell.distance()
# for item in cell.diffs:
# item.tx1 = h.t + 1
# item.initial = item.atp
# item.c0cleft = item.c0cleft
# item.h = cell.distances.get(cell.diffusions.get(item))
# h.continuerun(h.t+dt)
def show_output(v_vec, t_vec):
''' show graphs
Parameters
----------
v_vec: h.Vector()
recorded voltage
t_vec: h.Vector()
recorded time
'''
dend_plot = pyplot.plot(t_vec, v_vec)
pyplot.xlabel('time (ms)')
pyplot.ylabel('mV')
if __name__ == '__main__':
numofmodel = 8
cell = cfiber(250, 1, 0, 15000, True, numofmodel)
for sec in h.allsec():
h.psection(sec=sec) #show parameters of each section
branch_vec, t_vec = set_recording_vectors(cell.branch)
print(cell.numofmodel)
simulate(cell)
show_output(branch_vec, t_vec)
pyplot.show() | mit |
yonglehou/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
joyeshmishra/spark-tk | python/sparktk/frame/frame.py | 3 | 21047 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.rdd import RDD
from pyspark.sql import DataFrame
from sparktk.frame.pyframe import PythonFrame
from sparktk.frame.schema import schema_to_python, schema_to_scala, schema_is_coercible
from sparktk import dtypes
import logging
logger = logging.getLogger('sparktk')
from sparktk.propobj import PropertiesObject
from sparktk import TkContext
# import constructors for the API's sake (not actually dependencies of the Frame class)
from sparktk.frame.constructors.create import create
from sparktk.frame.constructors.import_csv import import_csv
from sparktk.frame.constructors.import_csv_raw import import_csv_raw
from sparktk.frame.constructors.import_hbase import import_hbase
from sparktk.frame.constructors.import_hive import import_hive
from sparktk.frame.constructors.import_jdbc import import_jdbc
from sparktk.frame.constructors.import_json import import_json
from sparktk.frame.constructors.import_pandas import import_pandas
from sparktk.frame.constructors.import_xml import import_xml
__all__ = ["create",
"Frame",
"import_csv",
"import_csv_raw",
"import_hbase",
"import_hive",
"import_jdbc",
"import_json",
"import_pandas",
"import_xml",
"load"]
class Frame(object):
def __init__(self, tc, source, schema=None, validate_schema=False):
"""(Private constructor -- use tc.frame.create or other methods available from the TkContext)"""
self._tc = tc
if self._is_scala_frame(source):
self._frame = source
elif self._is_scala_rdd(source):
scala_schema = schema_to_scala(tc.sc, schema)
self._frame = self._create_scala_frame(tc.sc, source, scala_schema)
elif self._is_scala_dataframe(source):
self._frame = self._create_scala_frame_from_scala_dataframe(tc.sc, source)
elif isinstance(source, DataFrame):
self._frame = self._create_scala_frame_from_scala_dataframe(tc.sc, source._jdf)
elif isinstance(source, PythonFrame):
self._frame = source
else:
if not isinstance(source, RDD):
if not isinstance(source, list) or (len(source) > 0 and any(not isinstance(row, (list, tuple)) for row in source)):
raise TypeError("Invalid data source. The data parameter must be a 2-dimensional list (list of row data) or an RDD.")
inferred_schema = False
if isinstance(schema, list):
if all(isinstance(item, basestring) for item in schema):
# check if schema is just a list of column names (versus string and data type tuples)
schema = self._infer_schema(source, schema)
inferred_schema = True
elif not all(isinstance(item, tuple) and
len(item) == 2 and
isinstance(item[0], basestring) for item in schema):
raise TypeError("Invalid schema. Expected a list of tuples (str, type) with the column name and data type, but received type %s." % type(schema))
# check for duplicate column names
column_names = [col[0] for col in schema]
duplicate_column_names = set([col for col in column_names if column_names.count(col) > 1])
if len(duplicate_column_names) > 0:
raise ValueError("Invalid schema, column names cannot be duplicated: %s" % ", ".join(duplicate_column_names))
elif schema is None:
schema = self._infer_schema(source)
inferred_schema = True
else:
# Schema is not a list or None
raise TypeError("Invalid schema type: %s. Expected a list of tuples (str, type) with the column name and data type." % type(schema))
for item in schema:
if not self._is_supported_datatype(item[1]):
if inferred_schema:
raise TypeError("The %s data type was found when inferring the schema, and it is not a "
"supported data type. Instead, specify a schema that uses a supported data "
"type, and enable validate_schema so that the data is converted to the proper "
"data type.\n\nInferred schema: %s\n\nSupported data types: %s" %
(str(item[1]), str(schema), dtypes.dtypes))
else:
raise TypeError("Invalid schema. %s is not a supported data type.\n\nSupported data types: %s" %
(str(item[1]), dtypes.dtypes))
source = tc.sc.parallelize(source)
if schema and validate_schema:
# Validate schema by going through the data and checking the data type and attempting to parse it
validate_schema_result = self.validate_pyrdd_schema(source, schema)
source = validate_schema_result.validated_rdd
logger.debug("%s values were unable to be parsed to the schema's data type." % validate_schema_result.bad_value_count)
# If schema contains matrix datatype, then apply type_coercer to convert list[list] to numpy ndarray
map_source = schema_is_coercible(source, list(schema))
self._frame = PythonFrame(map_source, schema)
def _merge_types(self, type_list_a, type_list_b):
"""
Merges two lists of data types
:param type_list_a: First list of data types to merge
:param type_list_b: Second list of data types to merge
:return: List of merged data types
"""
if not isinstance(type_list_a, list) or not isinstance(type_list_b, list):
raise TypeError("Unable to generate schema, because schema is not a list.")
if len(type_list_a) != len(type_list_b):
raise ValueError("Length of each row must be the same (found rows with lengths: %s and %s)." % (len(type_list_a), len(type_list_b)))
return [dtypes._DataTypes.merge_types(type_list_a[i], type_list_b[i]) for i in xrange(0, len(type_list_a))]
def _infer_types_for_row(self, row):
"""
Returns a list of data types for the data in the specified row
:param row: List or Row of data
:return: List of data types
"""
inferred_types = []
for item in row:
if item is None:
inferred_types.append(int)
elif not isinstance(item, list):
inferred_types.append(type(item))
else:
inferred_types.append(dtypes.vector((len(item))))
return inferred_types
def _infer_schema(self, data, column_names=[], sample_size=100):
"""
Infers the schema based on the data in the RDD.
:param sc: Spark Context
:param data: Data used to infer schema
:param column_names: Optional column names to use in the schema. If no column names are provided, columns
are given numbered names. If there are more columns in the RDD than there are in the
column_names list, remaining columns will be numbered.
:param sample_size: Number of rows to check when inferring the schema. Defaults to 100.
:return: Schema
"""
inferred_schema = []
if isinstance(data, list):
if len(data) > 0:
# get the schema for the first row
data_types = self._infer_types_for_row(data[0])
sample_size = min(sample_size, len(data))
for i in xrange (1, sample_size):
data_types = self._merge_types(data_types, self._infer_types_for_row(data[i]))
for i, data_type in enumerate(data_types):
column_name = "C%s" % i
if len(column_names) > i:
column_name = column_names[i]
inferred_schema.append((column_name, data_type))
else:
raise TypeError("Unable to infer schema, because the data provided is not a list.")
return inferred_schema
def _is_supported_datatype(self, data_type):
"""
Returns True if the specified data_type is supported.
"""
supported_primitives = [int, float, long, str, unicode]
if data_type in supported_primitives:
return True
elif data_type is dtypes.datetime:
return True
elif type(data_type) is dtypes.vector:
return True
elif data_type is dtypes.matrix:
return True
else:
return False
def validate_pyrdd_schema(self, pyrdd, schema):
if isinstance(pyrdd, RDD):
schema_length = len(schema)
num_bad_values = self._tc.sc.accumulator(0)
def validate_schema(row, accumulator):
data = []
if len(row) != schema_length:
raise ValueError("Length of the row (%s) does not match the schema length (%s)." % (len(row), len(schema)))
for index, column in enumerate(schema):
data_type = column[1]
try:
if row[index] is not None:
data.append(dtypes.dtypes.cast(row[index], data_type))
except:
data.append(None)
accumulator += 1
return data
validated_rdd = pyrdd.map(lambda row: validate_schema(row, num_bad_values))
# Force rdd to load, so that we can get a bad value count
validated_rdd.count()
return SchemaValidationReturn(validated_rdd, num_bad_values.value)
else:
raise TypeError("Unable to validate schema, because the pyrdd provided is not an RDD.")
@staticmethod
def _create_scala_frame(sc, scala_rdd, scala_schema):
"""call constructor in JVM"""
return sc._jvm.org.trustedanalytics.sparktk.frame.Frame(scala_rdd, scala_schema, False)
@staticmethod
def _create_scala_frame_from_scala_dataframe(sc, scala_dataframe):
"""call constructor in JVM"""
return sc._jvm.org.trustedanalytics.sparktk.frame.Frame(scala_dataframe)
@staticmethod
def _from_scala(tc, scala_frame):
"""creates a python Frame for the given scala Frame"""
return Frame(tc, scala_frame)
def _frame_to_scala(self, python_frame):
"""converts a PythonFrame to a Scala Frame"""
scala_schema = schema_to_scala(self._tc.sc, python_frame.schema)
scala_rdd = self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.rdd.PythonJavaRdd.pythonToScala(python_frame.rdd._jrdd, scala_schema)
return self._create_scala_frame(self._tc.sc, scala_rdd, scala_schema)
def _is_scala_frame(self, item):
return self._tc._jutils.is_jvm_instance_of(item, self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.Frame)
def _is_scala_rdd(self, item):
return self._tc._jutils.is_jvm_instance_of(item, self._tc.sc._jvm.org.apache.spark.rdd.RDD)
def _is_scala_dataframe(self, item):
return self._tc._jutils.is_jvm_instance_of(item, self._tc.sc._jvm.org.apache.spark.sql.DataFrame)
def _is_python_rdd(self, item):
return isinstance(item, RDD)
@property
def _is_scala(self):
"""answers whether the current frame is backed by a Scala Frame"""
answer = self._is_scala_frame(self._frame)
logger.info("frame._is_scala reference: %s" % answer)
return answer
@property
def _is_python(self):
"""answers whether the current frame is backed by a _PythonFrame"""
answer = not self._is_scala_frame(self._frame)
logger.info("frame._is_python reference: %s" % answer)
return answer
@property
def _scala(self):
"""gets frame backend as Scala Frame, causes conversion if it is current not"""
if self._is_python:
logger.info("frame._scala reference: converting frame backend from Python to Scala")
# If schema contains matrix dataype,
# then apply type_coercer_pymlib to convert ndarray to pymlib DenseMatrix for serialization purpose at java
self._frame.rdd = schema_is_coercible(self._frame.rdd, list(self._frame.schema), True)
# convert PythonFrame to a Scala Frame"""
scala_schema = schema_to_scala(self._tc.sc, self._frame.schema)
scala_rdd = self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.rdd.PythonJavaRdd.pythonToScala(self._frame.rdd._jrdd, scala_schema)
self._frame = self._create_scala_frame(self._tc.sc, scala_rdd, scala_schema)
else:
logger.info("frame._scala reference: frame already has a scala backend")
return self._frame
@property
def _python(self):
"""gets frame backend as _PythonFrame, causes conversion if it is current not"""
if self._is_scala:
logger.info("frame._python reference: converting frame backend from Scala to Python")
# convert Scala Frame to a PythonFrame"""
scala_schema = self._frame.schema()
java_rdd = self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.rdd.PythonJavaRdd.scalaToPython(self._frame.rdd())
python_schema = schema_to_python(self._tc.sc, scala_schema)
python_rdd = RDD(java_rdd, self._tc.sc)
# If schema contains matrix datatype, then apply type_coercer to convert list[list] to numpy ndarray
map_python_rdd = schema_is_coercible(python_rdd, list(python_schema))
self._frame = PythonFrame(map_python_rdd, python_schema)
else:
logger.info("frame._python reference: frame already has a python backend")
return self._frame
##########################################################################
# API
##########################################################################
@property
def rdd(self):
"""pyspark RDD (causes conversion if currently backed by a Scala RDD)"""
return self._python.rdd
@property
def dataframe(self):
"""pyspark DataFrame (causes conversion through Scala)"""
return DataFrame(self._scala.dataframe(), self._tc.sql_context)
@property
def schema(self):
if self._is_scala:
return schema_to_python(self._tc.sc, self._frame.schema()) # need ()'s on schema because it's a def in scala
return self._frame.schema
@property
def column_names(self):
"""
Column identifications in the current frame.
:return: list of names of all the frame's columns
Returns the names of the columns of the current frame.
Examples
--------
<skip>
>>> frame.column_names
[u'name', u'age', u'tenure', u'phone']
</skip>
"""
return [name for name, data_type in self.schema]
# Frame Operations
from sparktk.frame.ops.add_columns import add_columns
from sparktk.frame.ops.append import append
from sparktk.frame.ops.assign_sample import assign_sample
from sparktk.frame.ops.bin_column import bin_column
from sparktk.frame.ops.binary_classification_metrics import binary_classification_metrics
from sparktk.frame.ops.box_cox import box_cox
from sparktk.frame.ops.categorical_summary import categorical_summary
from sparktk.frame.ops.collect import collect
from sparktk.frame.ops.column_median import column_median
from sparktk.frame.ops.column_mode import column_mode
from sparktk.frame.ops.column_summary_statistics import column_summary_statistics
from sparktk.frame.ops.copy import copy
from sparktk.frame.ops.correlation import correlation
from sparktk.frame.ops.correlation_matrix import correlation_matrix
from sparktk.frame.ops.count import count
from sparktk.frame.ops.covariance import covariance
from sparktk.frame.ops.covariance_matrix import covariance_matrix
from sparktk.frame.ops.cumulative_percent import cumulative_percent
from sparktk.frame.ops.cumulative_sum import cumulative_sum
from sparktk.frame.ops.dot_product import dot_product
from sparktk.frame.ops.drop_columns import drop_columns
from sparktk.frame.ops.drop_duplicates import drop_duplicates
from sparktk.frame.ops.drop_rows import drop_rows
from sparktk.frame.ops.ecdf import ecdf
from sparktk.frame.ops.entropy import entropy
from sparktk.frame.ops.export_to_csv import export_to_csv
from sparktk.frame.ops.export_to_jdbc import export_to_jdbc
from sparktk.frame.ops.export_to_json import export_to_json
from sparktk.frame.ops.export_to_hbase import export_to_hbase
from sparktk.frame.ops.export_to_hive import export_to_hive
from sparktk.frame.ops.filter import filter
from sparktk.frame.ops.flatten_columns import flatten_columns
from sparktk.frame.ops.group_by import group_by
from sparktk.frame.ops.histogram import histogram
from sparktk.frame.ops.inspect import inspect
from sparktk.frame.ops.join_inner import join_inner
from sparktk.frame.ops.join_left import join_left
from sparktk.frame.ops.join_right import join_right
from sparktk.frame.ops.join_outer import join_outer
from sparktk.frame.ops.map_columns import map_columns
from sparktk.frame.ops.matrix_covariance_matrix import matrix_covariance_matrix
from sparktk.frame.ops.matrix_pca import matrix_pca
from sparktk.frame.ops.matrix_svd import matrix_svd
from sparktk.frame.ops.multiclass_classification_metrics import multiclass_classification_metrics
from sparktk.frame.ops.power_iteration_clustering import power_iteration_clustering
from sparktk.frame.ops.quantile_bin_column import quantile_bin_column
from sparktk.frame.ops.quantiles import quantiles
from sparktk.frame.ops.rename_columns import rename_columns
from sparktk.frame.ops.reverse_box_cox import reverse_box_cox
from sparktk.frame.ops.save import save
from sparktk.frame.ops.sort import sort
from sparktk.frame.ops.sortedk import sorted_k
from sparktk.frame.ops.take import take
from sparktk.frame.ops.tally import tally
from sparktk.frame.ops.tally_percent import tally_percent
from sparktk.frame.ops.timeseries_augmented_dickey_fuller_test import timeseries_augmented_dickey_fuller_test
from sparktk.frame.ops.timeseries_breusch_godfrey_test import timeseries_breusch_godfrey_test
from sparktk.frame.ops.timeseries_breusch_pagan_test import timeseries_breusch_pagan_test
from sparktk.frame.ops.timeseries_durbin_watson_test import timeseries_durbin_watson_test
from sparktk.frame.ops.timeseries_from_observations import timeseries_from_observations
from sparktk.frame.ops.timeseries_slice import timeseries_slice
from sparktk.frame.ops.to_pandas import to_pandas
from sparktk.frame.ops.topk import top_k
from sparktk.frame.ops.unflatten_columns import unflatten_columns
def load(path, tc=TkContext.implicit):
"""load Frame from given path"""
TkContext.validate(tc)
return tc.load(path, Frame)
class SchemaValidationReturn(PropertiesObject):
"""
Return value from schema validation that includes the rdd of validated values and the number of bad values
that were found.
"""
def __init__(self, validated_rdd, bad_value_count):
self._validated_rdd = validated_rdd
self._bad_value_count = bad_value_count
@property
def validated_rdd(self):
"""
RDD of values that have been casted to the data type specified by the frame's schema.
"""
return self._validated_rdd
@property
def bad_value_count(self):
"""
Number of values that were unable to be parsed to the data type specified by the schema.
"""
return self._bad_value_count
| apache-2.0 |
jhanley634/testing-tools | problem/pop_map/demographic/num_reps.py | 1 | 2285 | #! /usr/bin/env python
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import pandas as pd
# https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/IG0UN2
# https://dataverse.harvard.edu/api/access/datafile/3814252?format=original&gbrecs=true
def _get_num_districts(in_file='/tmp/1976-2018-house2.csv'):
df = pd.read_csv(in_file, encoding='latin-1')
df = df[(df.year == 2018) & (df.district >= 0) & ~df.writein]
df = df[['state_po', 'district', 'candidate', 'party', 'candidatevotes']]
df = df.rename(columns={'state_po': 'state'})
df = df[['state', 'district']]
df = df.drop_duplicates(['state', 'district'])
return df.append([dict(state='DC', district=42)])
def _get_num_electors():
df = _get_num_districts().groupby('state').agg('count')
df = df.rename(columns={'district': 'electors'})
df.electors += 2
return df
def find_ties(target=269):
xs = sorted(_get_num_electors().electors, reverse=True)
assert 51 == len(xs)
assert 2 * target == sum(xs)
# greedy
s1 = s2 = 0
for x in xs:
if s1 <= s2:
s1 += x
else:
s2 += x
if s1 == s2:
print(s1, s2, x)
if __name__ == '__main__':
find_ties()
| mit |
cowlicks/dask | dask/dataframe/tests/test_dataframe.py | 1 | 69392 | import sys
from operator import getitem
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
from dask.async import get_sync
from dask import delayed
from dask.utils import raises, ignoring, put_lines
import dask.dataframe as dd
from dask.dataframe.core import (repartition_divisions, _loc, aca, _concat,
_Frame, Scalar)
from dask.dataframe.utils import eq, make_meta
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_Dataframe():
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert eq(d['a'] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
assert eq(d[d['b'] > 2], full[full['b'] > 2])
assert eq(d[['a', 'b']], full[['a', 'b']])
assert eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert eq(d.head(2), full.head(2))
assert eq(d.head(3), full.head(3))
assert eq(d.head(2), dsk[('x', 0)].head(2))
assert eq(d['a'].head(2), full['a'].head(2))
assert eq(d['a'].head(3), full['a'].head(3))
assert eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert (sorted(d.head(2, compute=False).dask) ==
sorted(d.head(2, compute=False).dask))
assert (sorted(d.head(2, compute=False).dask) !=
sorted(d.head(3, compute=False).dask))
assert eq(d.tail(2), full.tail(2))
assert eq(d.tail(3), full.tail(3))
assert eq(d.tail(2), dsk[('x', 2)].tail(2))
assert eq(d['a'].tail(2), full['a'].tail(2))
assert eq(d['a'].tail(3), full['a'].tail(3))
assert eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert (sorted(d.tail(2, compute=False).dask) ==
sorted(d.tail(2, compute=False).dask))
assert (sorted(d.tail(2, compute=False).dask) !=
sorted(d.tail(3, compute=False).dask))
def test_head_npartitions():
assert eq(d.head(5, npartitions=2), full.head(5))
assert eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert eq(d.head(5, npartitions=-1), full.head(5))
assert eq(d.head(7, npartitions=-1), full.head(7))
assert eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
@pytest.mark.skipif(sys.version_info[:2] == (3,3),
reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_head_npartitions_warn():
with pytest.warns(None):
d.head(100)
with pytest.warns(None):
d.head(7)
with pytest.warns(None):
d.head(7, npartitions=2)
def test_index_head():
assert eq(d.index.head(2), full.index[:2])
assert eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert eq((d + 1), full + 1)
assert repr(d.a).startswith('dd.Series')
def test_repr():
df = pd.DataFrame({'x': list(range(100))})
ddf = dd.from_pandas(df, 3)
for x in [ddf, ddf.index, ddf.x]:
assert type(x).__name__ in repr(x)
assert x._name[:5] in repr(x)
assert str(x.npartitions) in repr(x)
assert len(repr(x)) < 80
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10))]:
ddf = dd.from_pandas(case, 3)
assert eq(ddf.index, case.index)
assert repr(ddf.index).startswith('dd.Index')
assert raises(AttributeError, lambda: ddf.index.index)
def test_Scalar():
val = np.int64(1)
s = Scalar({('a', 0): val}, 'a', 'i8')
assert hasattr(s, 'dtype')
assert 'dtype' in dir(s)
assert eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp('2001-01-01')
s = Scalar({('a', 0): val}, 'a', val)
assert not hasattr(s, 'dtype')
assert 'dtype' not in dir(s)
assert eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}), npartitions=2)
assert 'a b c' not in dir(df)
df = dd.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}), npartitions=2)
assert 'a' in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(tm.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
tm.assert_index_equal(d[['b', 'a']].columns, pd.Index(['b', 'a']))
assert d['a'].name == 'a'
assert (d['a'] + 1).name == 'a'
assert (d['a'] + d['b']).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == 'x'
assert ddf.index.compute().name == 'x'
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == 'b'
assert eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == 'b'
assert eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert d4.index.name == 'b'
assert eq(d4, full.set_index('b'))
def test_set_index_interpolate():
df = pd.DataFrame({'x': [1, 1, 1, 3, 3], 'y': [1., 1, 1, 1, 2]})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 3])
d2 = d.set_index('y', npartitions=3)
assert d2.divisions[0] == 1.
assert 1. < d2.divisions[1] < d2.divisions[2] < 2.
assert d2.divisions[3] == 2.
def test_set_index_interpolate_int():
L = sorted(list(range(0, 200, 10))*2)
df = pd.DataFrame({'x': 2*L})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range('20130101', periods=3))
s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
df = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index('notz', npartitions=2)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[2] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index('tz', npartitions=2)
s2 = pd.DatetimeIndex(s_aware.values, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[2] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame({'A': list('ABAABBABAA'),
'B': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'C': [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert eq(ddf.set_index('A', drop=drop),
pdf.set_index('A', drop=drop))
assert eq(ddf.set_index('B', drop=drop),
pdf.set_index('B', drop=drop))
assert eq(ddf.set_index('C', drop=drop),
pdf.set_index('C', drop=drop))
assert eq(ddf.set_index(ddf.A, drop=drop),
pdf.set_index(pdf.A, drop=drop))
assert eq(ddf.set_index(ddf.B, drop=drop),
pdf.set_index(pdf.B, drop=drop))
assert eq(ddf.set_index(ddf.C, drop=drop),
pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame({0: list('ABAABBABAA'),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert eq(ddf.set_index(0, drop=drop),
pdf.set_index(0, drop=drop))
assert eq(ddf.set_index(2, drop=drop),
pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with tm.assertRaisesRegexp(NotImplementedError, msg):
ddf.set_index(['a', 'b'])
def test_rename_columns():
# GH 819
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with tm.assertRaisesRegexp(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
s.name = 'renamed'
ds.name = 'renamed'
assert s.name == 'renamed'
assert eq(ds, s)
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert eq(s.describe(), ds.describe())
assert eq(df.describe(), ddf.describe())
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert eq(df.describe(), ddf.describe())
def test_cumulative():
pdf = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 5)
assert eq(ddf.cumsum(), pdf.cumsum())
assert eq(ddf.cumprod(), pdf.cumprod())
assert eq(ddf.cummin(), pdf.cummin())
assert eq(ddf.cummax(), pdf.cummax())
assert eq(ddf.cumsum(axis=1), pdf.cumsum(axis=1))
assert eq(ddf.cumprod(axis=1), pdf.cumprod(axis=1))
assert eq(ddf.cummin(axis=1), pdf.cummin(axis=1))
assert eq(ddf.cummax(axis=1), pdf.cummax(axis=1))
assert eq(ddf.a.cumsum(), pdf.a.cumsum())
assert eq(ddf.a.cumprod(), pdf.a.cumprod())
assert eq(ddf.a.cummin(), pdf.a.cummin())
assert eq(ddf.a.cummax(), pdf.a.cummax())
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert eq(ddf.x.dropna(), df.x.dropna())
assert eq(ddf.y.dropna(), df.y.dropna())
assert eq(ddf.z.dropna(), df.z.dropna())
assert eq(ddf.dropna(), df.dropna())
assert eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'd': [False] * 9,
'e': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert eq(ddf.where(ddcond), pdf.where(pdcond))
assert eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
# ToDo: Should work on pandas 0.17
# https://github.com/pydata/pandas/pull/10283
# assert eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
# assert eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b),
full.a + full.b)
assert eq(dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert eq(d.map_partitions(lambda df: df, meta=d), full)
assert eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
assert eq(result, full.sum(axis=1))
assert eq(d.map_partitions(lambda df: 1), pd.Series([1, 1, 1]),
check_divisions=False)
x = Scalar({('x', 0): 1}, 'x', int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_names():
func = lambda x: x
assert sorted(dd.map_partitions(func, d, meta=d).dask) == \
sorted(dd.map_partitions(func, d, meta=d).dask)
assert sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) == \
sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask)
func = lambda x, y: x
assert sorted(dd.map_partitions(func, d, d, meta=d).dask) == \
sorted(dd.map_partitions(func, d, d, meta=d).dask)
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = dd.map_partitions(lambda df: df.x + 1, a, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = a.map_partitions(lambda df: df.x + 1, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_keeps_kwargs_in_dict():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
assert "'x': 5" in str(b.dask)
assert eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_drop_duplicates():
# can't detect duplicates only from cached data
assert eq(d.a.drop_duplicates(), full.a.drop_duplicates())
assert eq(d.drop_duplicates(), full.drop_duplicates())
assert eq(d.index.drop_duplicates(), full.index.drop_duplicates())
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{'keep': 'first'}, {'keep': 'last'}]:
assert eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_set_partition():
d2 = d.set_partition('b', [0, 2, 9])
assert d2.divisions == (0, 2, 9)
expected = full.set_index('b')
assert eq(d2, expected)
def test_set_partition_compute():
d2 = d.set_partition('b', [0, 2, 9])
d3 = d.set_partition('b', [0, 2, 9], compute=True)
assert eq(d2, d3)
assert eq(d2, full.set_index('b'))
assert eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_partition(d.b, [0, 2, 9])
d5 = d.set_partition(d.b, [0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert eq(d4, d5)
assert eq(d4, exp)
assert eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with tm.assertRaises(ValueError):
ddf.get_partition(-1)
with tm.assertRaises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_cache():
d2 = d.cache()
assert all(task[0] == getitem for task in d2.dask.values())
assert eq(d2.a, d.a)
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
a = dd.from_pandas(df, npartitions=3)
result = a.x.value_counts()
expected = df.x.value_counts()
# because of pandas bug, value_counts doesn't hold name (fixed in 0.17)
# https://github.com/pydata/pandas/pull/10419
assert eq(result, expected, check_names=False)
def test_unique():
pdf = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
'y': ['a', 'c', 'b', np.nan, 'c',
'b', 'a', 'd', np.nan, 'a']})
ddf = dd.from_pandas(pdf, npartitions=3)
assert eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name='x'))
assert eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name='y'))
def test_isin():
assert eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
assert eq(d.a.isin(pd.Series([0, 1, 2])),
full.a.isin(pd.Series([0, 1, 2])))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
# because of a pandas bug, name is not preserved
# https://github.com/pydata/pandas/pull/10881
assert result.name == 'b'
assert result.compute().name == 'b'
assert eq(result, exp, check_names=False)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert eq(ddf.quantile(axis=1), df.quantile(axis=1))
assert raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert eq(d.index, full.index)
def test_assign():
d_unknown = dd.from_pandas(full, npartitions=3, sort=False)
assert not d_unknown.known_divisions
res = d.assign(c=1,
d='string',
e=d.a.sum(),
f=d.a + d.b)
res_unknown = d_unknown.assign(c=1,
d='string',
e=d_unknown.a.sum(),
f=d_unknown.a + d_unknown.b)
sol = full.assign(c=1,
d='string',
e=full.a.sum(),
f=full.a + full.b)
assert eq(res, sol)
assert eq(res_unknown, sol)
res = d.assign(c=full.a + 1)
assert eq(res, full.assign(c=full.a + 1))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
d_unknown.assign(c=full.a + 1)
# unsupported type
with pytest.raises(TypeError):
d.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
d_unknown.assign(foo=d.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
d.assign(foo=d_unknown.a)
def test_map():
assert eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in full.a.values)
assert eq(d.a.map(lk), full.a.map(lk))
assert eq(d.b.map(lk), full.b.map(lk))
lk = pd.Series(lk)
assert eq(d.a.map(lk), full.a.map(lk))
assert eq(d.b.map(lk), full.b.map(lk))
assert eq(d.b.map(lk, meta=d.b), full.b.map(lk))
assert eq(d.b.map(lk, meta=('b', 'i8')), full.b.map(lk))
assert raises(TypeError, lambda: d.a.map(d.b))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert eq(e, f)
assert eq(d.a, type(d.a)(*d.a._args))
assert eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, 'x', meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
d = dd.DataFrame(dsk, 'x', meta, [None, None, None, None])
full = d.compute(get=dask.get)
assert eq(d.a.sum(), full.a.sum())
assert eq(d.a + d.b + 1, full.a + full.b + 1)
def test_concat2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
a = dd.DataFrame(dsk, 'x', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
b = dd.DataFrame(dsk, 'y', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
meta = make_meta({'b': 'i8', 'c': 'i8'})
c = dd.DataFrame(dsk, 'y', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60],
'd': [70, 80, 90]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10],
'd': [90, 80, 70]},
index=[3, 4, 5])}
meta = make_meta({'b': 'i8', 'c': 'i8', 'd': 'i8'},
index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'y', meta, [0, 3, 5])
cases = [[a, b], [a, c], [a, d]]
assert dd.concat([a]) is a
for case in cases:
result = dd.concat(case)
pdcase = [c.compute() for c in case]
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase), result)
assert result.dask == dd.concat(case).dask
result = dd.concat(case, join='inner')
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase, join='inner'), result)
assert result.dask == dd.concat(case, join='inner').dask
def test_concat3():
pdf1 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCDE'), index=list('abcdef'))
pdf2 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCFG'), index=list('ghijkl'))
pdf3 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCHI'), index=list('mnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
result = dd.concat([ddf1, ddf2])
assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions
assert result.npartitions == ddf1.npartitions + ddf2.npartitions
assert eq(result, pd.concat([pdf1, pdf2]))
assert eq(dd.concat([ddf1, ddf2], interleave_partitions=True),
pd.concat([pdf1, pdf2]))
result = dd.concat([ddf1, ddf2, ddf3])
assert result.divisions == (ddf1.divisions[:-1] + ddf2.divisions[:-1] +
ddf3.divisions)
assert result.npartitions == (ddf1.npartitions + ddf2.npartitions +
ddf3.npartitions)
assert eq(result, pd.concat([pdf1, pdf2, pdf3]))
assert eq(dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),
pd.concat([pdf1, pdf2, pdf3]))
def test_concat4_interleave_partitions():
pdf1 = pd.DataFrame(np.random.randn(10, 5),
columns=list('ABCDE'), index=list('abcdefghij'))
pdf2 = pd.DataFrame(np.random.randn(13, 5),
columns=list('ABCDE'), index=list('fghijklmnopqr'))
pdf3 = pd.DataFrame(np.random.randn(13, 6),
columns=list('CDEXYZ'), index=list('fghijklmnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
msg = ('All inputs have known divisions which cannot be '
'concatenated in order. Specify '
'interleave_partitions=True to ignore order')
cases = [[ddf1, ddf1], [ddf1, ddf2], [ddf1, ddf3], [ddf2, ddf1],
[ddf2, ddf3], [ddf3, ddf1], [ddf3, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case)
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
msg = "'join' must be 'inner' or 'outer'"
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat([ddf1, ddf1], join='invalid', interleave_partitions=True)
def test_concat5():
pdf1 = pd.DataFrame(np.random.randn(7, 5),
columns=list('ABCDE'), index=list('abcdefg'))
pdf2 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('abcdefg'))
pdf3 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('cdefghi'))
pdf4 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('cdefghi'))
pdf5 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('fklmnop'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
ddf4 = dd.from_pandas(pdf4, 2)
ddf5 = dd.from_pandas(pdf5, 3)
cases = [[ddf1, ddf2], [ddf1, ddf3], [ddf1, ddf4], [ddf1, ddf5],
[ddf3, ddf4], [ddf3, ddf5], [ddf5, ddf1, ddf4], [ddf5, ddf3],
[ddf1.A, ddf4.A], [ddf2.F, ddf3.F], [ddf4.A, ddf5.A],
[ddf1.A, ddf4.F], [ddf2.F, ddf3.H], [ddf4.A, ddf5.B],
[ddf1, ddf4.A], [ddf3.F, ddf2], [ddf5, ddf1.A, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
# Dask + pandas
cases = [[ddf1, pdf2], [ddf1, pdf3], [pdf1, ddf4],
[pdf1.A, ddf4.A], [ddf2.F, pdf3.F],
[ddf1, pdf4.A], [ddf3.F, pdf2], [ddf2, pdf1, ddf3.F]]
for case in cases:
pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
def test_append():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]})
df2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
df3 = pd.DataFrame({'b': [1, 2, 3, 4, 5, 6],
'c': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
ddf = dd.from_pandas(df, 2)
ddf2 = dd.from_pandas(df2, 2)
ddf3 = dd.from_pandas(df3, 2)
s = pd.Series([7, 8], name=6, index=['a', 'b'])
assert eq(ddf.append(s), df.append(s))
assert eq(ddf.append(ddf2), df.append(df2))
assert eq(ddf.a.append(ddf2.a), df.a.append(df2.a))
# different columns
assert eq(ddf.append(ddf3), df.append(df3))
assert eq(ddf.a.append(ddf3.b), df.a.append(df3.b))
# dask + pandas
assert eq(ddf.append(df2), df.append(df2))
assert eq(ddf.a.append(df2.a), df.a.append(df2.a))
assert eq(ddf.append(df3), df.append(df3))
assert eq(ddf.a.append(df3.b), df.a.append(df3.b))
df4 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[4, 5, 6, 7, 8, 9])
ddf4 = dd.from_pandas(df4, 2)
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
with tm.assertRaisesRegexp(ValueError, msg):
ddf.append(ddf4)
def test_append2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
ddf1 = dd.DataFrame(dsk, 'x', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
ddf2 = dd.DataFrame(dsk, 'y', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
meta = make_meta({'b': 'i8', 'c': 'i8'})
ddf3 = dd.DataFrame(dsk, 'y', meta, [None, None])
assert eq(ddf1.append(ddf2), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1), ddf3.b.compute().append(ddf1.compute()))
# Dask + pandas
assert eq(ddf1.append(ddf2.compute()), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1.compute()), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2.compute()), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1.compute()), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3.compute()), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1.compute()), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3.compute()), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1.compute()), ddf3.b.compute().append(ddf1.compute()))
def test_dataframe_picklable():
from pickle import loads, dumps
cloudpickle = pytest.importorskip('cloudpickle')
cp_dumps = cloudpickle.dumps
d = tm.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert eq(df, df2)
df2 = loads(cp_dumps(df))
assert eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert eq(df.A, a2)
a2 = loads(cp_dumps(df.A))
assert eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert eq(df.index, i2)
i2 = loads(cp_dumps(df.index))
assert eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = loads(cp_dumps(s))
assert eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5])
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert len(a.compute()) + len(b.compute()) == len(full)
def test_series_nunique():
ps = pd.Series(list('aaabbccccdddeee'), name='a')
s = dd.from_pandas(ps, npartitions=3)
assert eq(s.nunique(), ps.nunique())
def test_set_partition_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_partition('y', ['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(get=get_sync).index[-2:]) == ['d', 'd']
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([d._get(d.dask, k) for k in keys])
assert eq(orig, sp)
assert eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert eq(a, b)
assert eq(a._get(b.dask, (b._name, 0)), df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
assert raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], 'a', 'b', 'c')
assert result == {('b', 0): (_loc, ('a', 0), 0, 6, False),
('b', 1): (_loc, ('a', 0), 6, 6, True),
('c', 0): ('b', 0),
('c', 1): ('b', 1)}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')
assert result == {('b', 0): (_loc, ('a', 0), 1, 3, False),
('b', 1): (_loc, ('a', 1), 3, 4, False),
('b', 2): (_loc, ('a', 1), 4, 6, False),
('b', 3): (_loc, ('a', 1), 6, 7, True),
('c', 0): (pd.concat, (list, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df.y)
def test_repartition_npartitions():
for use_index in (True, False):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
for n in [1, 2, 4, 5]:
for k in [1, 2, 4, 5]:
if k > n:
continue
a = dd.from_pandas(df, npartitions=n, sort=use_index)
k = min(a.npartitions, k)
b = a.repartition(npartitions=k)
eq(a, b)
assert b.npartitions == k
a = dd.from_pandas(df, npartitions=1)
with pytest.raises(ValueError):
a.repartition(npartitions=5)
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert eq(a.x.dropna(), df.x.dropna())
assert eq(a.x.fillna(100), df.x.fillna(100))
assert eq(a.fillna(100), df.fillna(100))
assert eq(a.x.between(2, 4), df.x.between(2, 4))
assert eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert eq(a.x.notnull(), df.x.notnull())
assert eq(a.x.isnull(), df.x.isnull())
assert eq(a.notnull(), df.notnull())
assert eq(a.isnull(), df.isnull())
assert len(a.sample(0.5).compute()) < len(df)
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.5)
assert eq(b, b)
c = a.sample(0.5, random_state=1234)
d = a.sample(0.5, random_state=1234)
assert eq(c, d)
assert a.sample(0.5)._name != a.sample(0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert eq(a.x.dt.date, df.x.dt.date, check_names=False)
assert (a.x.dt.to_pydatetime().compute() == df.x.dt.to_pydatetime()).all()
assert a.x.dt.date.dask == a.x.dt.date.dask
assert a.x.dt.to_pydatetime().dask == a.x.dt.to_pydatetime().dask
def test_str_accessor():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'D']})
a = dd.from_pandas(df, 2)
assert 'upper' in dir(a.x.str)
assert eq(a.x.str.upper(), df.x.str.upper())
assert a.x.str.upper().dask == a.x.str.upper().dask
def test_empty_max():
meta = make_meta({'x': 'i8'})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
meta, [None, None, None])
assert eq(a.x.max(), 1)
def test_nlargest_series():
s = pd.Series([1, 3, 5, 2, 4, 6])
ss = dd.from_pandas(s, npartitions=2)
assert eq(ss.nlargest(2), s.nlargest(2))
def test_query():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
q = a.query('x**2 > y')
with ignoring(ImportError):
assert eq(q, df.query('x**2 > y'))
def test_eval():
p = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
with ignoring(ImportError):
assert eq(p.eval('x + y'), d.eval('x + y'))
assert eq(p.eval('z = x + y', inplace=False),
d.eval('z = x + y', inplace=False))
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=True)
if p.eval('z = x + y', inplace=None) is None:
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=None)
@pytest.mark.parametrize('include, exclude', [
([int], None),
(None, [int]),
([np.number, object], [float]),
(['datetime'], None)
])
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame({'cint': [1] * n,
'cstr': ['a'] * n,
'clfoat': [1.] * n,
'cdt': pd.date_range('2016-01-01', periods=n)
})
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert eq(result, expected)
def test_deterministic_arithmetic_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted((a.x + a.y ** 2).dask) == sorted((a.x + a.y ** 2).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x + a.y ** 3).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x - a.y ** 2).dask)
def test_deterministic_reduction_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert a.x.sum()._name == a.x.sum()._name
assert a.x.mean()._name == a.x.mean()._name
assert a.x.var()._name == a.x.var()._name
assert a.x.min()._name == a.x.min()._name
assert a.x.max()._name == a.x.max()._name
assert a.x.count()._name == a.x.count()._name
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert (sorted(a.x.drop_duplicates().dask) ==
sorted(a.x.drop_duplicates().dask))
assert (sorted(a.groupby('x').y.mean().dask) ==
sorted(a.groupby('x').y.mean().dask))
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert (sorted(aca(a.x, f, f, a.x._meta).dask) !=
sorted(aca(a.x, f2, f2, a.x._meta).dask))
assert (sorted(aca(a.x, f, f, a.x._meta).dask) ==
sorted(aca(a.x, f, f, a.x._meta).dask))
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(a.x, chunk=chunk, aggregate=agg, chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key}, both_key=both_key)
assert (sorted(res.dask) ==
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=both_key).dask))
assert (sorted(res.dask) !=
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=0).dask))
assert eq(res, df.x.sum() + 2*(c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg,
chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert eq(res, sol)
# Should infer as a scalar
res = aca([ddf.x], chunk=lambda x: pd.Series([x.sum()]),
aggregate=lambda x: x.sum())
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_reduction_method():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={'val': 25})
res2._name == ddf.reduction(chunk, aggregate=agg,
chunk_kwargs={'val': 25})._name
assert res2._name != res._name
assert eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
res = ddf.reduction(sum_and_count,
aggregate=lambda x: x.groupby(level=0).sum())
assert eq(res, pd.DataFrame({'sum': df.sum(), 'count': df.count()}))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert eq(a.drop('y', axis=1), df.drop('y', axis=1))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert eq(np.cos(df['x']), np.cos(ddf['x']))
assert eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
assert raises(ValueError, lambda: d.rename(index=renamer))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert eq(s.to_frame(), a.to_frame())
assert eq(s.to_frame('bar'), a.to_frame('bar'))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
assert eq(ddf.x.apply(lambda x: x + 1),
df.x.apply(lambda x: x + 1))
# specify columns
assert eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1, columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert eq(ddf.apply(lambda xy: xy[0] + xy[1], axis='columns', columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis='columns'))
# inference
assert eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert eq(ddf.apply(lambda xy: xy, axis=1),
df.apply(lambda xy: xy, axis=1))
# result will be dataframe
func = lambda x: pd.Series([x, x])
assert eq(ddf.x.apply(func, name=[0, 1]), df.x.apply(func))
# inference
assert eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis='index')
def test_cov():
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=3)
assert eq(ddf.cov(), df.cov())
assert eq(ddf.cov(10), df.cov(10))
assert ddf.cov()._name == ddf.cov()._name
assert ddf.cov(10)._name != ddf.cov()._name
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=3)
db = dd.from_pandas(b, npartitions=4)
assert eq(da.cov(db), a.cov(b))
assert eq(da.cov(db, 10), a.cov(b, 10))
assert da.cov(db)._name == da.cov(db)._name
assert da.cov(db, 10)._name != da.cov(db)._name
def test_corr():
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=3)
assert eq(ddf.corr(), df.corr())
assert eq(ddf.corr(min_periods=10), df.corr(min_periods=10))
assert ddf.corr()._name == ddf.corr()._name
assert ddf.corr(min_periods=10)._name != ddf.corr()._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method='spearman'))
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=3)
db = dd.from_pandas(b, npartitions=4)
assert eq(da.corr(db), a.corr(b))
assert eq(da.corr(db, min_periods=10), a.corr(b, min_periods=10))
assert da.corr(db)._name == da.corr(db)._name
assert da.corr(db, min_periods=10)._name != da.corr(db)._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method='spearman'))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_cov_corr_meta():
df = pd.DataFrame({'a': np.array([1, 2, 3]),
'b': np.array([1.0, 2.0, 3.0], dtype='f4'),
'c': np.array([1.0, 2.0, 3.0])},
index=pd.Index([1, 2, 3], name='myindex'))
ddf = dd.from_pandas(df, npartitions=2)
eq(ddf.corr(), df.corr())
eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == 'f8'
assert ddf.a.corr(ddf.b)._meta.dtype == 'f8'
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.random((20000000, 2)) * 2 - 1, columns=['a', 'b'])
ddf = dd.from_pandas(df, npartitions=50)
assert eq(ddf.cov(), df.cov())
assert eq(ddf.corr(), df.corr())
def test_apply_infer_columns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=['sum', 'mean'])
# DataFrame to completely different DataFrame
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['sum', 'mean']))
assert eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=['x2', 'x3'])
# Series to completely different DataFrame
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['x2', 'x3']))
assert eq(result, df.x.apply(return_df2))
# Series to Series
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == 'x'
assert eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert (i.index.day == a.index.day.compute()).all()
assert (i.index.month == a.index.month.compute()).all()
def test_nlargest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.nlargest(5, 'a')
exp = df.nlargest(5, 'a')
eq(res, exp)
def test_nlargest_multiple_columns():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.nlargest(5, ['a', 'b'])
expected = df.nlargest(5, ['a', 'b'])
eq(result, expected)
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.reset_index()
exp = df.reset_index()
assert len(res.index.compute()) == len(exp.index)
tm.assert_index_equal(res.columns, exp.columns)
tm.assert_numpy_array_equal(res.compute().values, exp.values)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
def test_from_delayed():
dfs = [delayed(tm.makeTimeDataFrame)(i) for i in range(1, 5)]
meta = dfs[0].compute()
df = dd.from_delayed(dfs, meta=meta)
assert (df.compute().columns == df.columns).all()
f = lambda x: pd.Series([len(x)])
assert list(df.map_partitions(f).compute()) == [1, 2, 3, 4]
ss = [df.A for df in dfs]
s = dd.from_delayed(ss, meta=meta.A)
assert s.compute().name == s.name
assert list(s.map_partitions(f).compute()) == [1, 2, 3, 4]
def test_from_delayed_sorted():
a = pd.DataFrame({'x': [1, 2]}, index=[1, 10])
b = pd.DataFrame({'x': [4, 1]}, index=[100, 200])
A = dd.from_delayed([delayed(a), delayed(b)], divisions='sorted')
assert A.known_divisions
assert A.divisions == (1, 100, 200)
def test_to_delayed():
from dask.delayed import Delayed
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
a, b = ddf.to_delayed()
assert isinstance(a, Delayed)
assert isinstance(b, Delayed)
assert eq(a.compute(), df.iloc[:2])
def test_astype():
df = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[10, 20, 30, 40])
a = dd.from_pandas(df, 2)
assert eq(a.astype(float), df.astype(float))
assert eq(a.x.astype(float), df.x.astype(float))
def test_groupby_callable():
a = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert eq(a.groupby(iseven).y.sum(),
b.groupby(iseven).y.sum())
assert eq(a.y.groupby(iseven).sum(),
b.y.groupby(iseven).sum())
def test_set_index_sorted_true():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index('x', sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
eq(a.set_index('x', drop=drop),
df.set_index('x', drop=drop))
eq(a.set_index(a.x, sorted=True, drop=drop),
df.set_index(df.x, drop=drop))
eq(a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop))
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_compute_divisions():
from dask.dataframe.core import compute_divisions
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]},
index=[1, 3, 10, 20])
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_divisions(a)
eq(a, b)
assert b.known_divisions
def test_methods_tokenize_differently():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (df.x.map_partitions(lambda x: pd.Series(x.min()))._name !=
df.x.map_partitions(lambda x: pd.Series(x.max()))._name)
def test_sorted_index_single_partition():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
eq(ddf.set_index('x', sorted=True),
df.set_index('x'))
def test_info():
from io import StringIO
from dask.compatibility import unicode
# TODO This should be fixed in pandas 0.18.2
if pd.__version__ == '0.18.0':
from pandas.core import format
else:
from pandas.formats import format
format._put_lines = put_lines
test_frames = [
pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=pd.Int64Index(range(4))), # No RangeIndex in dask
pd.DataFrame()
]
for df in test_frames:
buf_pd, buf_da = StringIO(), StringIO()
ddf = dd.from_pandas(df, npartitions=4)
df.info(buf=buf_pd)
ddf.info(buf=buf_da, verbose=True, memory_usage=True)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
assert stdout_pd == stdout_da
buf = StringIO()
ddf = dd.from_pandas(pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=range(4)), npartitions=4)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Data columns (total 2 columns):\n"
"x int64\n"
"y int64\n"
"dtypes: int64(2)")
# buf=None
assert ddf.info(buf=None) is None
def test_gh_1301():
df = pd.DataFrame([['1', '2'], ['3', '4']])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
eq(ddf2,
df.assign(y=df[1].astype(int)))
assert ddf2.dtypes['y'] == np.dtype(int)
def test_timeseries_sorted():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = 'index'
eq(ddf.set_index('index', sorted=True, drop=True),
df)
def test_column_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
from copy import copy
orig = copy(ddf)
ddf['z'] = ddf.x + ddf.y
df['z'] = df.x + df.y
eq(df, ddf)
assert 'z' not in orig.columns
def test_columns_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[['a', 'b']] = df2[['y', 'z']]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[['a', 'b']] = ddf2[['y', 'z']]
eq(df, ddf)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("idx", [
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range('20150101', periods=100)
])
def test_idxmaxmin(idx, skipna):
pdf = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'), index=idx)
pdf.b.iloc[31] = np.nan
pdf.d.iloc[78] = np.nan
ddf = dd.from_pandas(pdf, npartitions=3)
assert eq(pdf.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))
assert eq(pdf.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))
assert eq(pdf.idxmax(axis=1, skipna=skipna), ddf.idxmax(axis=1, skipna=skipna))
assert eq(pdf.idxmin(axis=1, skipna=skipna), ddf.idxmin(axis=1, skipna=skipna))
assert eq(pdf.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert eq(pdf.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
def test_getitem_meta():
data = {'col1': ['a', 'a', 'b'],
'col2': [0, 1, 0]}
df = pd.DataFrame(data=data, columns=['col1', 'col2'])
ddf = dd.from_pandas(df, npartitions=1)
eq(df.col2[df.col1 == 'a'],
ddf.col2[ddf.col1 == 'a'])
| bsd-3-clause |
DGrady/pandas | doc/sphinxext/ipython_sphinxext/ipython_directive.py | 5 | 37811 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, text_type, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
try:
from traitlets.config import Config
except ImportError:
from IPython import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend and 'matplotlib.backends' not in sys.modules:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if self.state.document.current_source not in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
try:
self.shell.IP.prompt_manager.width = 0
except AttributeError:
# GH14003: class promptManager has removed after IPython 5.x
pass
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
allotria/intellij-community | python/helpers/pycharm_display/datalore/display/supported_data_type.py | 14 | 2135 | import json
from abc import abstractmethod
from datetime import datetime
try:
import numpy
except ImportError:
numpy = None
try:
import pandas
except ImportError:
pandas = None
# Parameter 'value' can also be pandas.DataFrame
def _standardize_dict(value):
result = {}
for k, v in value.items():
result[_standardize_value(k)] = _standardize_value(v)
return result
def is_int(v):
return isinstance(v, int) or (numpy and isinstance(v, numpy.integer))
def is_float(v):
return isinstance(v, float) or (numpy and isinstance(v, numpy.floating))
def is_number(v):
return is_int(v) or is_float(v)
def is_shapely_geometry(v):
try:
from shapely.geometry.base import BaseGeometry
return isinstance(v, BaseGeometry)
except ImportError:
return False
def _standardize_value(v):
if v is None:
return v
if isinstance(v, bool):
return bool(v)
if is_int(v):
return int(v)
if isinstance(v, str):
return str(v)
if is_float(v):
return float(v)
if isinstance(v, dict) or (pandas and isinstance(v, pandas.DataFrame)):
return _standardize_dict(v)
if isinstance(v, list):
return [_standardize_value(elem) for elem in v]
if isinstance(v, tuple):
return tuple(_standardize_value(elem) for elem in v)
if (numpy and isinstance(v, numpy.ndarray)) or (pandas and isinstance(v, pandas.Series)):
return _standardize_value(v.tolist())
if isinstance(v, datetime):
return v.timestamp() * 1000 # convert from second to millisecond
if isinstance(v, CanToDataFrame):
return _standardize_dict(v.to_data_frame())
if is_shapely_geometry(v):
from shapely.geometry import mapping
return json.dumps(mapping(v))
try:
return repr(v)
except Exception as e:
# TODO This needs a test case; Also exception should be logged somewhere
raise Exception('Unsupported type: {0}({1})'.format(v, type(v)))
class CanToDataFrame:
@abstractmethod
def to_data_frame(self): # -> pandas.DataFrame
pass
| apache-2.0 |
luca-penasa/cyclopy | cyclopy/tests/SSATestAS2006Figures.py | 1 | 1924 | # -*- coding: utf-8 -*-
"""
Created on Thu May 31 18:45:25 2012
@author: luca
"""
from __future__ import division
import SSA as SSA #import ssa methods
import numpy as np
from matplotlib.pyplot import axis, errorbar, close, plot, interactive, subplot, figure, title, xlabel, ylabel, interactive, grid, semilogy, hold, legend
interactive(True)
#load mratest.dat
mratest = np.loadtxt("mratest.dat")
x = mratest[:,0]
close('all') #close all
N = 300
#---------------------------------------------------------------
# Recreate Figure 1: the test signal alone and with noise added.
#---------------------------------------------------------------
figure()
plot(x, 'r', label="signal with noise")
hold (1)
plot(mratest[:,2], 'y', label="signal alone")
title('Figure 1: Test signal alone, and with added noise.')
legend()
#---------------------------------------------------------------
# Recreate Figure 3:
# This differs slightly from Allen and Smith's because I use the
# estimated AR(1) parameters rather than the known parameters.
#---------------------------------------------------------------
ssa = SSA.SSA(x)
ssa.setEmbeddingDimension(40)
ssa.setNumberOfMonteCarloSimulations(N)
ssa.update()
ssa.doMonteCarloSimulations()
c = ssa.getConfidence()
figure()
semilogy(ssa.Val_, '+r')
semilogy(c, 'c')
title('Figure 3: Eigenspectrum of test series and surrogate projections.')
#---------------------------------------------------------------
# Recreate Figure 4:
# This differs slightly from Allen and Smith's because I use the
# estimated AR(1) parameters rather than the known parameters.
#---------------------------------------------------------------
mE, fE = SSA.EOFDominantFrequencies(ssa.Eig_)
figure()
cm = np.mean(c, axis = 1)
semilogy (fE, ssa.Val_, '+r')
err = np.abs(c[:,0] - c[:,1]) / 2
errorbar (fE, cm, yerr=[c[:,1] - cm, cm - c[:,0]], linestyle='.')
axis([0, 0.5, 0.05, 15])
title("Fig 4")
| gpl-2.0 |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| agpl-3.0 |
zfrenchee/pandas | pandas/tests/test_sorting.py | 4 | 17593 | import pytest
from itertools import product
from collections import defaultdict
import warnings
from datetime import datetime
import numpy as np
from numpy import nan
from pandas.core import common as com
from pandas import (DataFrame, MultiIndex, merge, concat, Series, compat,
_np_version_under1p10)
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.sorting import (is_int64_overflow_possible,
decons_group_index,
get_group_index,
nargsort,
lexsort_indexer,
safe_sort)
class TestSorting(object):
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com._asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict(
{'a': values, 'b': values, 'c': values, 'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
try:
# GH 2785; due to a regression in NumPy1.6.2
np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))
np.argsort(items2, kind='mergesort')
except TypeError:
pytest.skip('requested sort not available for type')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge(object):
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
assert len(out) == len(left)
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notna(),
'right': out['right'].notna(),
'inner': out['left'].notna() & out['right'].notna(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == 'outer'
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_decons():
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5)]
testit(label_list, shape)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype='object')
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer_from_list(self):
values = ['b', 1, 0, 'a', 0, 'b']
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
if compat.PY2 and not _np_version_under1p10:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
pytest.raises(TypeError, safe_sort, arr)
else:
pytest.raises(TypeError, safe_sort, arr)
def test_exceptions(self):
with tm.assert_raises_regex(TypeError,
"Only list-like objects are allowed"):
safe_sort(values=1)
with tm.assert_raises_regex(TypeError,
"Only list-like objects or None"):
safe_sort(values=[0, 1, 2], labels=1)
with tm.assert_raises_regex(ValueError,
"values should be unique"):
safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
| bsd-3-clause |
architecture-building-systems/CityEnergyAnalyst | cea/utilities/dbf.py | 1 | 3527 | """
A collection of utility functions for working with ``*.DBF`` (dBase database) files.
"""
import numpy as np
import pandas as pd
import os
import cea.config
# import PySAL without the warning
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import pysal
__author__ = "Clayton Miller"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Clayton Miller", "Jimeno A. Fonseca", "Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
TYPE_MAPPING = {
int: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int64: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
str: ('C', 25, 0),
np.bool_: ('L', 1, 0)}
def dataframe_to_dbf(df, dbf_path, specs=None):
"""Given a pandas Dataframe, write a dbase database to ``dbf_path``.
:type df: pandas.Dataframe
:type dbf_path: str
:param specs: A list of column specifications for the dbase table. Each column is specified by a tuple (datatype,
size, decimal) - we support ``datatype in ('N', 'C')`` for strings, integers and floating point numbers, if
no specs are provided (see ``TYPE_MAPPING``)
:type specs: list[tuple(str, int, int)]
"""
if specs is None:
types = [type(df[i].iloc[0]) for i in df.columns]
specs = [TYPE_MAPPING[t] for t in types]
# handle case of strings that are longer than 25 characters (e.g. for the "Name" column)
for i in range(len(specs)):
t, l, d = specs[i] # type, length, decimals
if t == 'C':
l = max(l, df[df.columns[i]].apply(len).max())
specs[i] = t, l, d
dbf = pysal.lib.io.open(dbf_path, 'w', 'dbf')
dbf.header = list(df.columns)
dbf.field_spec = specs
for row in range(len(df)):
dbf.write(df.iloc[row])
dbf.close()
return dbf_path
def dbf_to_dataframe(dbf_path, index=None, cols=None, include_index=False):
dbf = pysal.lib.io.open(dbf_path)
if cols:
if include_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = dbf.header
data = dict([(var, dbf.by_col(var)) for var in vars_to_read])
if index:
index = dbf.by_col(index)
dbf.close()
return pd.DataFrame(data, index=index)
else:
dbf.close()
return pd.DataFrame(data)
def xls_to_dbf(input_file, output_path, output_file_name):
df = pd.read_excel(input_file)
output_file = os.path.join(output_path, output_file_name + ".dbf")
dataframe_to_dbf(df, output_file)
def dbf_to_xls(input_file, output_path, output_file_name):
df = dbf_to_dataframe(input_file)
df.to_excel(os.path.join(output_path, output_file_name + ".xlsx"), index=False)
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
input_file = config.dbf_tools.input_file
output_file_name = config.dbf_tools.output_file_name
output_path = config.dbf_tools.output_path
if input_file.endswith('.dbf'):
dbf_to_xls(input_file=input_file, output_path=output_path, output_file_name=output_file_name)
elif input_file.endswith('.xls') or input_file.endswith('.xlsx'):
xls_to_dbf(input_file=input_file, output_path=output_path, output_file_name=output_file_name)
else:
print('input file type not supported')
if __name__ == '__main__':
main(cea.config.Configuration())
| mit |
EFerriss/HydrogenCpx | HydrogenCpx/Fig5_Fig6_CpxProfiles.py | 1 | 28995 | # -*- coding: utf-8 -*-+
"""
Created on Wed Jun 24 17:55:02 2015
@author: Ferriss
Before and after heating comparison of FTIR spectra on the rims of cpx samples
"""
import cpx_spectra
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator
import pynams.diffusion as diffusion
import string
import json
from lmfit import minimize
#%% DIFFUSIVITIES
### Saved preferred initial values (i), diffusivities (D), and errors (e)
### for Kunlun diopside K3, K4 at 91 hours of heating (K91) and 154 hours
### of heating (K154), K5, and Jaipur diopside J1 (J)
### In peak wavenumber order: 3645, 3617, 3540, 3443, 3355, BULK H #####
### The same numbers are used in Fig. 7
### The data and marker styles are set in cpx_spectra.py
i_K3 = cpx_spectra.i_K3
D_K3 = cpx_spectra.D_K3
D_K3 = cpx_spectra.e_K3
i_K91 = cpx_spectra.i_K91
D_K91 = cpx_spectra.D_K91
D_K91 = cpx_spectra.e_K91
i_K154 = cpx_spectra.i_K154
D_K154 = cpx_spectra.D_K154
D_K154 = cpx_spectra.e_K154
i_K5 = cpx_spectra.i_K5
D_K5 = cpx_spectra.D_K5
D_K5 = cpx_spectra.e_K5
i_J = cpx_spectra.i_J
D_J = cpx_spectra.D_J
D_J = cpx_spectra.e_J
#%% Data setup
#iwater_Kunlun = cpx_spectra.K_water_peaks
#iwater_Jaipur = cpx_spectra.J_water_peaks
#iwater_Kunlun_bulk = sum(iwater_Kunlun)
#iwater_Jaipur_bulk = sum(iwater_Jaipur)
# all whole block data associated with Kunlun diopside sample K3
wbsK3 = [cpx_spectra.K3wb_trueInit,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K3wb_6days,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
]
# all whole block data relevant to Kunlun diopside sample K4
# only up to heating for 91 hours
wbsK4_91 = [
cpx_spectra.K4wb_init,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
cpx_spectra.K4wb_91hr
]
# all whole block data relevant to Kunlun diopside sample K4
# including heating for 154 hours
wbsK4_154 = [
cpx_spectra.K4wb_init,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
cpx_spectra.K4wb_91hr,
cpx_spectra.K4wb_154hr,
]
# all whole block data relevant to Kunlun diopside sample K5
wbsK5 = [
cpx_spectra.K5wb_init,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
cpx_spectra.K5wb
]
# all whole block data relevant to Jaipur diopside J1
wbsJ = [
cpx_spectra.J1wb_initial,
cpx_spectra.J1wb
]
# list of initial whole block data - 1 for each diopside
init_list = [
cpx_spectra.K3wb_trueInit,
cpx_spectra.K4wb_init,
cpx_spectra.K5wb_init,
cpx_spectra.J1wb_initial]
wb_list = wbsK3 + wbsK4_154 + wbsK5 + wbsJ + wbsK4_91
for wb in wb_list + init_list:
wb.get_baselines()
wb.get_peakfit()
wb.setupWB()
for prof in wb.profiles:
prof.make_wholeblock(True, True)
# Default to all Kunlun scaled water
# prof.wb_waters = np.array(prof.wb_areas) * iwater_Kunlun_bulk
#%% Plotting details and functions
style_bulk = {'marker' : 'o', 'fillstyle' : 'none', 'linestyle' : 'none',
'color' : 'k', 'markersize' : 7}
style_peak0 = {'marker' : 'x', 'fillstyle' : 'none', 'linestyle' : 'none',
'color' : 'r', 'markersize' : 6, 'label' : '3645 cm$^{-1}$'}
style_peak1 = {'marker' : '+', 'fillstyle' : 'none', 'linestyle' : 'none',
'color' : 'orange', 'markersize' : 6, 'label' : '3617 cm$^{-1}$'}
style_peak2 = {'marker' : 's', 'fillstyle' : 'full', 'linestyle' : 'none',
'color' : 'k', 'markersize' : 6, 'markerfacecolor' : 'teal',
'alpha' : 0.5, 'label' : '3540 cm$^{-1}$'}
style_peak3 = {'marker' : '_', 'fillstyle' : 'none', 'linestyle' : 'none',
'color' : 'g', 'markersize' : 6, 'mew' : 2,
'label' : '3460 cm$^{-1}$'}
style_peak4 = {'marker' : '|', 'fillstyle' : 'none', 'linestyle' : 'none',
'color' : 'b', 'markersize' : 6, 'mew' : 2,
'label' : '3443 cm$^{-1}$'}
style_peak5 = {'marker' : 'd', 'fillstyle' : 'full', 'linestyle' : 'none',
'color' : 'k', 'markersize' : 6, 'markerfacecolor' : 'violet',
'alpha' : 0.5, 'label' : '3355 cm$^{-1}$'}
style_init_K3 = {'marker' : 'o', 'markeredgecolor' : 'r', 'linestyle' : 'none',
'markersize' : 6, 'markerfacecolor' : 'w',
'label' : 'initial K3', 'mew' : 1, 'alpha' : 1.}
style_init_K4 = {'marker' : 's', 'markeredgecolor' : 'b', 'linestyle' : 'none',
'markersize' : 6, 'markerfacecolor' : 'w',
'label' : 'initial K4', 'mew' : 1, 'alpha' : 0.5}
style_init_K5 = {'marker' : '^', 'markeredgecolor' : 'g', 'linestyle' : 'none',
'markersize' : 6, 'markerfacecolor' : 'w',
'label' : 'initial K5', 'mew' : 1, 'alpha' : 0.5}
style_init_J = {'marker' : 'D', 'markeredgecolor' : 'k', 'linestyle' : 'none',
'markersize' : 6, 'markerfacecolor' : 'w',
'label' : 'initial J1', 'mew' : 1, 'alpha' : 0.5}
style_K4q = {'marker' : 'x', 'color' : 'b', 'linestyle' : 'none',
'markersize' : 6, 'label' : '480 $\degree$C, 0.6hr',
'mew' : 1, 'alpha' : 0.5}
style_K3q = {'marker' : '+', 'color' : 'r', 'linestyle' : 'none',
'markersize' : 6, 'label' : '696 $\degree$C, 2hr', 'mew' : 1,
'alpha' : 1.}
style_K3_15h = {'marker' : '3', 'color' : 'r', 'linestyle' : 'none',
'markersize' : 6, 'label' : '796 $\degree$C, 15hr', 'mew' : 1,
'alpha' : 0.5}
style_K3_6d = {'marker' : 'o', 'color' : 'r', 'linestyle' : 'none',
'markersize' : 6, 'label' : '812 $\degree$C, 140hr',
'alpha' : 0.5, 'mew' : 1}
style_K4_1h = {'marker' : '|', 'color' : 'c', 'linestyle' : 'none',
'markersize' : 6, 'label' : '904 $\degree$C, 0.7hr',
'mew' : 1, 'alpha' : 0.75}
style_K4_91 = {'marker' : 'd', 'color' : 'c', 'linestyle' : 'none',
'markersize' : 6, 'label' : '904 $\degree$C, 91hr',
'alpha' : 0.5}
style_K4_154 = {'marker' : 's', 'color' : 'b', 'linestyle' : 'none',
'markersize' : 6, 'label' : '904 $\degree$C, 154hr',
'alpha' : 0.5}
style_K5 = {'marker' : '^', 'color' : 'g', 'linestyle' : 'none',
'markersize' : 6, 'label' : '1000 $\degree$C, 75hr',
'alpha' : 0.5}
style_J = {'marker' : 'D', 'color' : 'k', 'linestyle' : 'none',
'markersize' : 6, 'label' : 'Jaipur\n904 $\degree$C, 0.6 hr',
'alpha' : 0.5}
style_iline = {'linestyle' : '--', 'color' : 'k',
'label' : '"initial" line\nfor D model'}
styledict={cpx_spectra.K3wb_trueInit : style_init_K3,
cpx_spectra.K3wb_init : style_K3q,
cpx_spectra.K3wb_800C_15hr : style_K3_15h,
cpx_spectra.K3wb_6days : style_K3_6d,
cpx_spectra.K4wb_init : style_init_K4,
cpx_spectra.K4wb_quench : style_K4q,
cpx_spectra.K4wb_1hr : style_K4_1h,
cpx_spectra.K4wb_91hr : style_K4_91,
cpx_spectra.K4wb_154hr : style_K4_154,
cpx_spectra.K5wb_init : style_init_K5,
cpx_spectra.K5wb : style_K5,
cpx_spectra.J1wb_initial : style_init_J,
cpx_spectra.J1wb : style_J}
# For least squares fitting to obtain diffusivities
func2min = diffusion.diffusion3Dwb_params
def get_best_initialD(fname, peak_idx):
"""Takes name = 'K4_91', 'K3', 'K5', 'K4_154', or 'J' and peak index
Returns best-fit initial and isotropic diffusivity generated by
Sentivity.py
"""
fname_list = ['K4_91', 'K3', 'K5', 'K4_154', 'J']
if fname not in fname_list:
print 'fname must be one of the following:'
print fname_list
return False, False
fi = '../../../CpxPaper/figures/sensitivity_'
peak_label_list = ['3645', '3617', '3540', '3460', '3443', '3355']
filename = ''.join((fi, fname, '_', peak_label_list[peak_idx], '.txt'))
f = open(filename, 'r')
x = json.load(f)
besti = x[0][0]
bestD = x[1][0]
return besti, bestD
def figOutline(top=30., ylab='peak area', ncol=3, nrow=4, figsize=(6.5, 8),
yTextPos=0.5, tit=None):
"""Make and return figure and axes[12]"""
fig = plt.figure()
fig.set_size_inches(figsize)
gs = gridspec.GridSpec(nrow, ncol)
axes = []
for row in range(nrow):
for col in range(ncol):
axes.append(plt.subplot(gs[row, col]))
axes[0].set_title('Profile || a*')
axes[1].set_title('Profile || b')
axes[2].set_title('Profile || c')
if tit is None:
if ylab == 'peak area':
fig.text(0.05, yTextPos, 'Peak area (cm$^{-2}$)',
ha='center', va='center', rotation='vertical', fontsize=14)
# fig.text(0.06, yTextPos, 'Bell et al. 1995 calibration: 7.09 +/- 0.32 cm$^{-2}$/ppm H$_2$O',
# ha='center', va='center', rotation='vertical', fontsize=12)
elif ylab == 'K5':
fig.text(0.04, yTextPos, 'Peak area in Kunlun diopside after 75 hr at 1000 $\degree$C (cm$^{-2}$)',
ha='center', va='center', rotation='vertical', fontsize=14)
else:
fig.text(0.04, yTextPos, 'Estimated water (ppm H$_2$O) using whole-block data\nscaled up using initial water estimates from polarized data',
ha='center', va='center', rotation='vertical', fontsize=14)
else:
fig.text(0.04, yTextPos, tit, ha='center', va='center',
rotation='vertical', fontsize=14)
ntwins = len(np.arange(2, len(axes), 3))
for idx in np.arange(2, len(axes), 3):
axes.append(axes[idx].twinx())
# axes[idx].set_ylim(0, 100)
for ax in axes:
ax.set_xlim(0., 1.)
ax.set_ylim(0, top)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
for idx in np.arange(0, 3*nrow, 3):
plt.setp(axes[idx].get_yticklabels(), visible=True)
nax = len(axes)
for idx in [nax-ntwins-3, nax-ntwins-2, nax-ntwins-1]:
plt.setp(axes[idx].get_xticklabels(), visible=True, rotation=45)
axes[nax-ntwins-2].set_xlabel('normalized distances (X / Length)', fontsize=14)
fig.subplots_adjust(bottom=0.1, top=0.95, right=0.88)
return fig, axes
def plotpeaks(wb_list, wbs_list, ilist, dlist, elist, dlist_slow=None,
slowb=[False]*6, legidx=13, sidelabels=True, ncol=5,
wholeblock=False, peak_idx_list = [0, 1, 2, 4, 5],
show_legend=True, dlabel='Isotropic\nDiffusion curve',
xtickgrid=[0.2]*6, ytickgrid=[5.]*6):
"""Takes wb = list of main whole block being plotted,
wbs = list of lists of whole block data being plotted in each panel,
ilist = list of initials, dlist = list of diffusivities,
elist = list of errors"""
#### legend ####
if show_legend is True:
styles = []
for wbs in wbs_list:
for wbtoplot in wbs:
styleToAdd = styledict[wbtoplot]
if styleToAdd not in styles:
styles.append(styleToAdd)
for sty in styles:
axes[legidx].plot(-100, -100, **sty)
axes[legidx].plot(-100, -100, '-k', label='"initial"', linewidth=1)
axes[legidx].plot(-100, -100, '-g', label=dlabel,
linewidth=3)
axes[legidx].plot(-100, -100, '--g', label='+/- error log10 D')
axes[legidx].legend(fancybox='on', loc=8) # 9 for top
axesranges = [range(3), range(3,6), range(6,9), range(9,12), range(12,15)]
### y axis limits and "initial"
for r in range(ncol):
for ax_idx in axesranges[r]:
ax = axes[ax_idx]
ax.plot(ax.get_xlim(), [ilist[r], ilist[r]], '-k', linewidth=1)
ax.set_ylim(0, tops[r])
xd = [] # x data to plot
yd = [] # y data to plot
yf = []
ys = []
ax_idx = 0
for idx in range(ncol): # loop through each row of data
# get basic data
peak_idx = peak_idx_list[idx]
initial = ilist[idx]
wb = wb_list[idx]
wbs = wbs_list[idx]
# diffusion curves
L3 = []
D3 = []
Dfast = []
Dslow = []
for k in range(3):
L3.append(wb.profiles[k].len_microns)
D3.append(dlist[idx])
if slowb[idx] is True:
D3[1] = dlist_slow[idx]
for k in range(3):
Dfast.append(D3[k] + elist[idx])
Dslow.append(D3[k] - elist[idx])
params = diffusion.params_setup3D(L3, D3, wb.time_seconds, initial)
xdiff, ydiff = diffusion.diffusion3Dwb_params(params,
raypaths=wb.raypaths, show_plot=False)
params = diffusion.params_setup3D(L3, Dfast, wb.time_seconds, initial)
xdiff, yfast = diffusion.diffusion3Dwb_params(params,
raypaths=wb.raypaths,
show_plot=False)
params = diffusion.params_setup3D(L3, Dslow, wb.time_seconds, initial)
xdiff, yslow = diffusion.diffusion3Dwb_params(params,
need_to_center_x_data=False,
raypaths=wb.raypaths,
show_plot=False)
for k in range(3):
m = max(xdiff[k])
xdiff[k] = xdiff[k] / m
xd.append(xdiff)
yd.append(ydiff)
yf.append(yfast)
ys.append(yslow)
### side labels ####
ylabelloc = 'left'
formatter = '{:.2f}'
if slowb[idx] is True:
subscript = '$_c$'
else:
subscript = ''
if sidelabels is True:
axes[15].set_ylabel(''.join(('3645\ncm$^{-1}$\n\nlog$_{10}$D', subscript,
'\n', formatter.format(peak_D[0]), '\n+/-', str(er[0]))),
rotation=0, ha=ylabelloc, va='center')
axes[16].set_ylabel(''.join(('3617\ncm$^{-1}$\n\nlog$_{10}$D', subscript, '\n',
formatter.format(peak_D[1]), '\n+/-', str(er[1]))),
rotation=0, ha=ylabelloc, va='center')
axes[17].set_ylabel(''.join(('3540\ncm$^{-1}$\n\nlog$_{10}$D', subscript, '\n',
formatter.format(peak_D[2]), '\n+/-', str(er[2]))),
rotation=0, ha=ylabelloc, va='center')
axes[18].set_ylabel(''.join(('3443\ncm$^{-1}$\n\nlog$_{10}$D', subscript, '\n',
formatter.format(peak_D[4]), '\n+/-', str(er[4]))),
rotation=0, ha=ylabelloc, va='center')
axes[19].set_ylabel(''.join(('3355\ncm$^{-1}$\n\nlog$_{10}$D', subscript, '\n',
formatter.format(peak_D[5]), '\n+/-', str(er[5]))),
rotation=0, ha=ylabelloc, va='center')
for k in range(3):
ax = axes[ax_idx]
# Set tick locations
xmajorLocator = MultipleLocator(xtickgrid[idx])
ymajorLocator = MultipleLocator(ytickgrid[idx])
ax.xaxis.set_major_locator(xmajorLocator)
ax.yaxis.set_major_locator(ymajorLocator)
if show_legend is True:
if ax_idx < legidx:
s = ''.join((string.ascii_uppercase[ax_idx],'.'))
elif ax_idx > legidx:
s = ''.join((string.ascii_uppercase[ax_idx-1],'.'))
else:
s = ''.join((string.ascii_uppercase[ax_idx],'.'))
ax.text(0.1, ax.get_ylim()[1] - 0.15*ax.get_ylim()[1], s,
backgroundcolor='w')
# Plot the diffusion curves
ax.plot(xd[idx][k], yd[idx][k], '-g', linewidth=3)
ax.plot(xd[idx][k], yf[idx][k], '--g')
ax.plot(xd[idx][k], ys[idx][k], '--g')
# Plot all wb data in wbs list of wholeblocks
for wb_idx in range(len(wbs)):
if wbs[wb_idx].raypaths[k] == wb.raypaths[k]:
xi3, yi3 = wbs[wb_idx].xy_picker(peak_idx=peak_idx,
wholeblock=wholeblock,
centered=False)
xi = xi3[k]
yi = yi3[k]
# Normalize x
L = wbs[wb_idx].profiles[k].len_microns
xi = np.array(xi) / L
ax.plot(xi, yi, **styledict[wbs[wb_idx]])
# label ray paths
if wb.profiles[k].raypath == 'a':
R = ''.join(('R || ', wb.profiles[k].raypath,'*'))
else:
R = ' '.join(('R ||', wb.profiles[k].raypath))
axes[ax_idx].text(0.65, ax.get_ylim()[1] - 0.15*ax.get_ylim()[1], R)
ax_idx = ax_idx + 1
def get_besti(wb, peak_idx, Dguess=-12):
"""Returns best-fit initial and isotropic D """
x, y = wb.xy_picker(peak_idx=peak_idx, wholeblock=False, heights_instead=False)
L3 = []
D3 = []
for k in range(3):
L3.append(wb.profiles[k].len_microns)
D3.append(Dguess)
# best fit initial and corresponding D
params = diffusion.params_setup3D(L3, D3, wb.time_seconds, initial=17., isotropic=True, vinit=True)
minimize(func2min, params, args=(x, y), kws={'raypaths' : wb.raypaths, 'show_plot' : False})
besti = params['initial_unit_value'].value
bestD = params['log10Dx'].value
return besti, bestD
def get_bestd(wb, peak_idx, initial, Dguess=-12):
"""Returns best-fit initial and isotropic D """
x, y = wb.xy_picker(peak_idx=peak_idx, wholeblock=False, heights_instead=False)
L3 = []
D3 = []
for k in range(3):
L3.append(wb.profiles[k].len_microns)
D3.append(Dguess)
# best fit initial and corresponding D
params = diffusion.params_setup3D(L3, D3, wb.time_seconds, initial=initial,
isotropic=True, vinit=False)
minimize(func2min, params, args=(x, y), kws={'raypaths' : wb.raypaths, 'show_plot' : False})
bestD = params['log10Dx'].value
return bestD
#%% Figure 5 - peak specific profiles
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5))
ylabelloc = 'left'
axes[15].set_ylabel('Kunlun\ndiopside\n"initial"\n696 $\degree$C\n2 hr', rotation=0, ha=ylabelloc, va='center')
axes[16].set_ylabel('Kunlun\ndiopside\n812 $\degree$C\n6 days', rotation=0, ha=ylabelloc, va='center')
axes[17].set_ylabel('Kunlun\ndiopside\n904 $\degree$C\n154 hr', rotation=0, ha=ylabelloc, va='center')
axes[18].set_ylabel('Kunlun\ndiopside\n1000 $\degree$C\n75 hr', rotation=0, ha=ylabelloc, va='center')
axes[19].set_ylabel('Jaipur\ndiopside\n904 $\degree$C\n0.6 hr', rotation=0, ha=ylabelloc, va='center')
axes_list = range(0, 15)
ax_idx = 0
wbs5 = [cpx_spectra.K3wb_init,
cpx_spectra.K3wb_6days,
cpx_spectra.K4wb_154hr,
cpx_spectra.K5wb,
cpx_spectra.J1wb,]
for wb in wbs5:
print wb.name
for k in range(3):
L = wb.profiles[k].len_microns
x = np.array(wb.profiles[k].positions_microns) / L
if wb.profiles[k].raypath == 'a':
R = ''.join(('R || ', wb.profiles[k].raypath,'*'))
else:
R = ' '.join(('R ||', wb.profiles[k].raypath))
if ax_idx > 0:
s = ''.join((string.ascii_uppercase[ax_idx-1],'.'))
axes[axes_list[ax_idx]].text(0.1, 25, s)
axes[axes_list[ax_idx]].text(0.65, 25, R)
idx = 0
for spectrum in wb.profiles[k].spectra_list:
print ''.join((spectrum.fname, ' (x=', '{:.2f}'.format(x[idx]), ')'))
idx = idx + 1
print ' '
y0 = wb.profiles[k].peak_areas[0, :]
axes[axes_list[ax_idx]].plot(x, y0, **style_peak0 )
y1 = wb.profiles[k].peak_areas[1, :]
axes[axes_list[ax_idx]].plot(x, y1, **style_peak1 )
y2 = wb.profiles[k].peak_areas[2, :]
axes[axes_list[ax_idx]].plot(x, y2, **style_peak2 )
y3 = wb.profiles[k].peak_areas[3, :]
axes[axes_list[ax_idx]].plot(x, y3, **style_peak3 )
y4 = wb.profiles[k].peak_areas[4, :]
axes[axes_list[ax_idx]].plot(x, y4, **style_peak4 )
y5 = wb.profiles[k].peak_areas[5, :]
axes[axes_list[ax_idx]].plot(x, y5, **style_peak5 )
ax_idx = ax_idx + 1
leg = axes[0].legend(loc=2, ncol=1, fancybox='on', fontsize=8)
plt.savefig('Fig5_CpxProfiles.eps', format='eps', dpi=1000)
fig.savefig('Fig5_CpxProfiles.tif', format='tif', dpi=300)
#%% K3 Diffusivity modeling; Supplemental Figure 1
wb = cpx_spectra.K3wb_6days
fname = 'K3'
iareas = i_K3
peak_D = D_K3
er = e_K3
tops = [17, 35, 30, 40, 15]
style_K3_6d['alpha'] = 1.
style_K3_6d['mew'] = 1.5
wbs = [
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K3wb_6days,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
]
tit = 'Peak area in Kunlun diopside after 6 days at 816 $\degree$C (cm$^{-2}$)'
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5), tit=tit)
plotpeaks(wb_list=[wb]*5, wbs_list=[wbs]*5, ilist=iareas, dlist=peak_D,
elist=er, sidelabels=False)
peak_idx_list = ['3645', '3617', '3540', '3443', '3355']
facecolors = ['wheat', 'thistle']
alphas = [0.4, 0.4]
xtext = 1.2
for k in range(5):
ytext_top = axes[k+15].get_ylim()[1]
ytext_top = ytext_top - 0.03*ytext_top
ra = axes[k+15].get_ylim()[1] - axes[k+15].get_ylim()[0]
ytext_gap = ra*0.475
ytext_bot = ytext_top - ytext_gap
sidelabel_top = ''.join((peak_idx_list[k], '\ncm$^{-1}$'))
sidelabel_bot = ''.join(('log$_{10}$D\n',
'{:.2f}'.format(peak_D[k]),
'\n+/-', '{:.1f}'.format(er[0])
))
axes[k+15].text(xtext, ytext_top,
sidelabel_top, rotation=0, va='top', ha='center',
bbox=dict(boxstyle='round', facecolor=facecolors[0],
alpha=alphas[0]))
axes[k+15].text(xtext, ytext_bot,
sidelabel_bot, rotation=0, va='top', ha='center',
bbox=dict(boxstyle='square', facecolor=facecolors[1],
alpha=alphas[1]))
fig.show()
print 'Finished'
plt.savefig('Supplement_Fig1_K3.eps', format='eps', dpi=1000)
plt.savefig('Supplement_Fig1_K3.tif', format='tif', dpi=300)
#%% K4 91 hr Diffusivity modeling; Supplemental Fig. 2
style_K4_91['alpha'] = 1.
style_K4_91['mew'] = 1.5
iareas = i_K91
peak_D = D_K91
er = e_K91
tops = [20, 35, 30, 35., 15]
wb = cpx_spectra.K4wb_91hr
wbs = [
cpx_spectra.K4wb_init,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
cpx_spectra.K4wb_91hr
]
tit = 'Peak area in Kunlun diopside after 91 hr at 904 $\degree$C (cm$^{-2}$)'
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5), tit=tit)
plotpeaks(wb_list=[wb]*5, wbs_list=[wbs]*5, ilist=iareas, dlist=peak_D,
elist=er)
plt.savefig('Supplement_Fig2_K4_91hr.eps', format='eps', dpi=1000)
plt.savefig('Supplement_Fig2_K4_91hr.tif', format='tif', dpi=300)
#%% K4 154 hr Diffusivity modeling; Supplemental Fig. 3
tit = 'Peak area in Kunlun diopside after 154 hr at 904 $\degree$C (cm$^{-2}$)'
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5), tit=tit)
style_K4_154['alpha'] = 1.
style_K4_154['mew'] = 1.5
wbs = [
cpx_spectra.K4wb_init,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
cpx_spectra.K4wb_91hr,
cpx_spectra.K4wb_154hr,
]
iareas = i_K154
peak_D = D_K154
er = e_K154
tops = [21, 36, 31, 35, 16]
wb = cpx_spectra.K4wb_154hr
plotpeaks(wb_list=[wb]*5, wbs_list=[wbs]*5, ilist=iareas, dlist=peak_D,
elist=er)
plt.savefig('Supplement_Fig3_K4_154hr.eps', format='eps', dpi=1000)
plt.savefig('Supplement_Fig3_K4_154hr.tif', format='tif', dpi=300)
#%% K5 Diffusivity modeling; Supplemental Fig. 4
tit = 'Peak area in Kunlun diopside after 75 hr at 1000 $\degree$C (cm$^{-2}$)'
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5), tit=tit)
wb = cpx_spectra.K5wb
style_K5['alpha'] = 1.
style_K5['mew'] = 1.5
wbs = [
cpx_spectra.K5wb_init,
cpx_spectra.K3wb_init,
cpx_spectra.K3wb_800C_15hr,
cpx_spectra.K4wb_quench,
cpx_spectra.K4wb_1hr,
cpx_spectra.K5wb
]
iareas = i_K5
peak_D = D_K5
er = e_K5
tops = [23, 35, 30, 35, 15]
plotpeaks(wb_list=[wb]*5, wbs_list=[wbs]*5, ilist=iareas, dlist=peak_D,
elist=er)
plt.savefig('Supplement_Fig4_K5.eps', format='eps', dpi=1000)
plt.savefig('Supplement_Fig4_K5.tif', format='tif', dpi=300)
#%% J diffusivity modeling; Supplemental Figure 5
wb = cpx_spectra.J1wb
style_J['alpha'] = 0.5
style_J['mew'] = 1.5
wbs = [
cpx_spectra.J1wb_initial,
cpx_spectra.J1wb
]
iareas = i_J
peak_D = D_J
er = e_J
tops = [4, 8, 23, 20, 50]
peak_D_slow = np.array(peak_D) - 1.
ytickgrid = [1, 2, 5, 5, 10]
tit = 'Peak area in Jaipur diopside after 0.6 hr at 904 $\degree$C (cm$^{-2}$)'
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5), tit=tit)
plotpeaks(wb_list=[wb]*5, wbs_list=[wbs]*5, ilist=iareas, dlist=peak_D,
elist=er, slowb=[True]*5, dlist_slow=peak_D_slow, legidx=4,
dlabel='Diffusion curve', ytickgrid=ytickgrid)
plt.savefig('Supplement_Fig5_J1.eps', format='eps', dpi=1000)
plt.savefig('Supplement_Fig5_J1.tif', format='tif', dpi=300)
#%% Bulk WB areas with diffusivity estimates; Fig. 6
wb_list = [cpx_spectra.K3wb_6days,
cpx_spectra.K4wb_91hr,
cpx_spectra.K4wb_154hr,
cpx_spectra.J1wb,
cpx_spectra.K5wb]
typelabels = ['Kunlun\ndiopside\n812 $\degree$C\n6 days',
'Kunlun\ndiopside\n904 $\degree$C\n91 hr',
'Kunlun\ndiopside\n904 $\degree$C\n154 hr',
'Jaipur\ndiopside\n904 $\degree$C\n0.6 hr',
'Kunlun\ndiopside\n1000 $\degree$C\n75 hr']
my_ilist = [i_K3, i_K91, i_K154,
i_J, i_K5]
my_dlist = [D_K3, D_K91, D_K154,
D_J, D_K5]
my_elist = [e_K3, e_K91, e_K154,
e_J, e_K5]
tops = [ 1.6, 1.6, 1.6, 1.8, 1.6,]
slowb_list = [False, False, False, True, False, False]
wbs_list = [[wb_list[0]], [wb_list[1]], [wb_list[2]], [wb_list[3]], [wb_list[4]]]
### Moving Jaipur to the end for all of them
for li in [wb_list, typelabels, my_ilist, my_dlist, my_elist,
tops, slowb_list, wbs_list]:
li.insert(4, li.pop(3))
iareas = np.ones(5)
Dxz = np.ones(5)
er = np.ones(5)
for idx in range(5):
iareas[idx] = my_ilist[idx][-1]
Dxz[idx] = my_dlist[idx][-1]
er[idx] = my_elist[idx][-1]
peak_D_slow = np.array(Dxz) - 1.
tit = 'Bulk H (Total area / Initial total area)'
fig, axes = figOutline(nrow=5, ncol=3, figsize=(6.5, 7.5),
top=1.6, tit=tit)
plotpeaks(wb_list=wb_list, wbs_list=wbs_list, ilist=iareas, dlist=Dxz,
elist=er, slowb=slowb_list, dlist_slow=peak_D_slow,
show_legend=False, sidelabels=False,
wholeblock=True, peak_idx_list=[None]*5, ytickgrid=[0.5]*5)
peak_idx_list = ['3645', '3617', '3540', '3443', '3355']
facecolors = ['wheat', 'thistle']
alphas = [0.4, 0.4]
xtext = 1.25
textsize = 9
for k in range(5):
ytext_top = axes[k+15].get_ylim()[1] / 2.
ytext_bot = axes[k+15].get_ylim()[0]
sidelabel_top = typelabels[k]
axes[k+15].text(xtext, ytext_top,
sidelabel_top, rotation=0, va='center', ha='center', fontsize=textsize,
bbox=dict(boxstyle='round', facecolor=facecolors[0],
alpha=alphas[0]))
idx = 0
for k in [2, 5, 8, 11, 14]:
sidelabel_bot = ''.join(('log$_{10}$D$_c$\n',
'{:.2f}'.format(my_dlist[idx][-1]),
'+/-', '{:.2f}'.format(my_elist[idx][-1])
))
if k == 14:
yloc = 0.
else:
yloc = 0.15
axes[k].text(0.5, yloc, sidelabel_bot, va='bottom', ha='center',
fontsize=textsize)
idx = idx + 1
plt.savefig('Fig6_DiffusivityFitting.eps', format='eps', dpi=1000)
plt.savefig('Fig6_DiffusivityFitting.tif', format='tif', dpi=300) | mit |
adwasser/spectral-knobs | knobs/cloud.py | 1 | 11952 | from itertools import cycle
from string import ascii_lowercase
import numpy as np
from matplotlib import pyplot as plt
from ipywidgets import FloatSlider, Checkbox, fixed, interactive
from .physics import hydrogen_lines, line_profile, c
class Cloud:
"""Hydrogen cloud
Parameters
----------
z : redshift
sigma : velocity dispersion in km/s
n : column density (arbitrary units for now)
P : period in years
vsini : velocity amplitude in km/s
t : time in years
lyman, balmer, etc... : boolean flags for including the specified series
absorption : bool, if true, subtract flux from continuum instead of
adding emission lines
continuum : func: wv -> flux, only used if absorption lines
or float for a flat continuum
"""
def __init__(self, z, sigma, n, P=5, vsini=0, t=0,
lyman=True, balmer=True, paschen=False,
absorption=False, continuum=1.0, n_upper=8):
self.z = z
self.sigma = sigma
self.n = n
self.P = P
self.vsini = vsini
self.t = t
self.lyman = lyman
self.balmer = balmer
self.paschen = paschen
self.absorption = absorption
if callable(continuum):
self.continuum = continuum
else:
self.continuum = lambda wv: continuum * np.ones(wv.shape)
self.n_upper = n_upper
@property
def series(self):
"""Construct a list of integers representing the series"""
series = []
if self.lyman:
series.append(1)
if self.balmer:
series.append(2)
if self.paschen:
series.append(3)
return series
@series.setter
def series(self, integers):
integers = list(map(int, integers))
for i in integers:
if i not in range(1, 4):
raise ValueError("Input needs to be from {1, 2, 3}")
self.lyman = True if 1 in integers else False
self.balmer = True if 2 in integers else False
self.paschen = True if 3 in integers else False
def line_flux(self, wv, weights=None):
"""Get line fluxes for the specified wavelength array."""
lines = hydrogen_lines(self.series, self.n_upper)
if weights is None:
weights = np.ones(lines.shape)
flux = np.zeros(wv.shape)
for i, line in enumerate(lines):
z = self.z + self.vsini * np.sin(2 * np.pi / self.P * self.t) / c
flux += line_profile(wv, line, z, self.sigma, self.n)
if self.absorption:
return self.continuum(wv) - flux
return flux
class CloudInteractive(interactive):
"""
Interactive wrapper to a hydrogen cloud.
Parameters
----------
wvmin : minimum wavelength in nm
wvmax : maximum wavelength in nm
cloud : Cloud object (if None, then construct from default)
cloud_kwargs : keyword arguments to pass to Cloud constructor
show_labels : bool, if True include labels in the widgets
widgets : iterable of strings, indicating which widgets to construct
"""
def __init__(self, wvmin, wvmax, cloud=None, cloud_kwargs={}, show_labels=True,
widgets=('z', 'sigma', 'n', 'lyman', 'balmer', 'paschen'),
zmin=0.00, zmax=0.10,
smin=1, smax=500,
nmin=0, nmax=0.1,
Pmin=0, Pmax=10,
vmin=0, vmax=0,
tmin=0, tmax=20):
if cloud is None:
self.cloud = Cloud(z=0.00,
sigma=(smin + smax) / 2.0,
n=(nmin + nmax) / 2.0,
P=(Pmin + Pmax) / 2.0,
vsini=(vmin + vmax) / 2.0,
t=(tmin + tmax) / 2.0,
**cloud_kwargs)
cloud = self.cloud
else:
self.cloud = cloud
dv = (smax + smin) / 8.0
dlam = dv / c * (wvmax - wvmin) / 2.0
wv = np.linspace(wvmin, wvmax, int((wvmax - wvmin) / dlam))
self.wv = wv
# set max height of graph
old_sigma = cloud.sigma
old_n = cloud.n
cloud.sigma = (smin + smax) / 2.0
cloud.n = nmax
self.ymax = 1.1 * np.amax(cloud.line_flux(wv))
cloud.sigma = old_sigma
cloud.n = old_n
# construct widget dictionary
widget_dict = {}
# float sliders
keys = ['z', 'sigma', 'n', 'P', 'v', 't']
labels = ['Redshift: ', 'Dispersion: ', 'Density: ',
'Period: ', 'Amplitude: ', 'Time: ']
widget_kwargs = {"disabled": False,
"continuous_update": False,
"orientation": "horizontal",
"readout": True,
"readout_format": ".4f"}
values = [cloud.z, cloud.sigma, cloud.n,
cloud.P, cloud.vsini, cloud.t]
bounds = [(zmin, zmax), (smin, smax), (nmin, nmax),
(Pmin, Pmax), (vmin, vmax), (tmin, tmax)]
letter = cycle(ascii_lowercase)
for i, key in enumerate(keys):
value = values[i]
if key not in widgets:
widget_dict[key] = fixed(value)
continue
if show_labels:
label = labels[i]
else:
label = "({})".format(next(letter))
lower, upper = bounds[i]
widget_dict[key] = FloatSlider(value=value,
min=lower,
max=upper,
step=(upper - lower) / 100,
description=label,
**widget_kwargs)
# boolean checkboxes
keys = ['lyman', 'balmer', 'paschen']
labels = [s.capitalize() + ": " for s in keys]
widget_kwargs = {"disabled": False}
values = [cloud.lyman, cloud.balmer, cloud.paschen]
for i, key in enumerate(keys):
value = values[i]
if key not in widgets:
widget_dict[key] = fixed(value)
continue
if show_labels:
label = labels[i]
else:
label = "({})".format(next(letter))
widget_dict[key] = Checkbox(value=value,
description=label,
**widget_kwargs)
self.widget_dict = widget_dict
super().__init__(self.plot, **widget_dict)
def plot(self, z, sigma, n, P, v, t, lyman, balmer, paschen):
self.cloud.z = z
self.cloud.sigma = sigma
self.cloud.n = n
self.cloud.P = P
self.cloud.vsini = v
self.cloud.t = t
self.cloud.lyman = lyman
self.cloud.balmer = balmer
self.cloud.paschen = paschen
flux = self.cloud.line_flux(self.wv)
plt.plot(self.wv, flux)
plt.xlabel("Wavelength [nm]")
plt.ylabel("Flux density [arbitrary units]")
# plt.ylabel(r"Flux density [erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$]")
plt.ylim(0, self.ymax)
plt.show()
class MultiCloudInteractive(interactive):
"""
Interactive with multiple clouds.
Parameters
----------
wvmin : minimum wavelength in nm
wvmax : maximum wavelength in nm
clouds : list of Cloud objects
show_labels : bool, if True include labels in the widgets
widgets : iterable of strings, indicating which widgets to construct
"""
def __init__(self, wvmin, wvmax, clouds, show_labels=True,
widgets=('z', 'sigma', 'n', 'lyman', 'balmer', 'paschen'),
zmin=0.00, zmax=0.10,
smin=1, smax=500,
nmin=0, nmax=0.1,
Pmin=0, Pmax=10,
vmin=1, vmax=100,
tmin=0, tmax=20):
self.clouds = clouds
self.ncomponents = len(clouds)
dv = (smax + smin) / 8.0
dlam = dv / c * (wvmax - wvmin) / 2.0
wv = np.linspace(wvmin, wvmax, int((wvmax - wvmin) / dlam))
self.wv = wv
# set max height of graph
cloud = clouds[0]
old_sigma = cloud.sigma
old_n = cloud.n
cloud.sigma = (smin + smax) / 2.0
cloud.n = nmax
self.ymax = 1.1 * np.amax(cloud.line_flux(wv))
cloud.sigma = old_sigma
cloud.n = old_n
# construct widget dictionary
widget_dict = {}
letter = cycle(ascii_lowercase)
for i, cloud in enumerate(self.clouds):
# float sliders
keys = ['z', 'sigma', 'n', 'P', 'v', 't']
labels = ['Redshift: ', 'Dispersion: ', 'Density: ',
'Period: ', 'Amplitude: ', 'Time: ']
widget_kwargs = {"disabled": False,
"continuous_update": False,
"orientation": "horizontal",
"readout": True,
"readout_format": ".4f"}
values = [cloud.z, cloud.sigma, cloud.n,
cloud.P, cloud.vsini, cloud.t]
bounds = [(zmin, zmax), (smin, smax), (nmin, nmax),
(Pmin, Pmax), (vmin, vmax), (tmin, tmax)]
for j, key in enumerate(keys):
value = values[j]
if key not in widgets:
widget_dict[key + str(i)] = fixed(value)
continue
if show_labels:
label = labels[j]
else:
label = "({})".format(next(letter))
lower, upper = bounds[j]
widget_dict[key + str(i)] = FloatSlider(value=value,
min=lower,
max=upper,
step=(upper - lower) / 100,
description=label,
**widget_kwargs)
# boolean checkboxes
keys = ['lyman', 'balmer', 'paschen']
labels = [s.capitalize() + ": " for s in keys]
widget_kwargs = {"disabled": False}
values = [cloud.lyman, cloud.balmer, cloud.paschen]
for j, key in enumerate(keys):
value = values[j]
if key not in widgets:
widget_dict[key + str(i)] = fixed(value)
continue
if show_labels:
label = labels[j]
else:
label = "({})".format(next(letter))
widget_dict[key + str(i)] = Checkbox(value=value,
description=label,
**widget_kwargs)
super().__init__(self.plot, **widget_dict)
def plot(self, **kwargs):
flux = np.zeros(self.wv.shape)
for i, cloud in enumerate(self.clouds):
cloud.z = kwargs['z' + str(i)]
cloud.sigma = kwargs['sigma' + str(i)]
cloud.n = kwargs['n' + str(i)]
cloud.P = kwargs['P' + str(i)]
cloud.vsini = kwargs['v' + str(i)]
cloud.t = kwargs['t' + str(i)]
cloud.lyman = kwargs['lyman' + str(i)]
cloud.balmer = kwargs['balmer' + str(i)]
cloud.paschen = kwargs['paschen' + str(i)]
flux += cloud.line_flux(self.wv)
plt.plot(self.wv, flux)
plt.xlabel("Wavelength [nm]")
plt.ylabel("Flux density [arbitrary units]")
# plt.ylabel(r"Flux density [erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$]")
plt.ylim(0, self.ymax)
plt.show()
| gpl-3.0 |
Chuban/moose | python/mooseutils/MooseDataFrame.py | 8 | 2614 | import os
import pandas
import message
class MooseDataFrame(object):
"""
A wrapper for handling data from a single csv file.
This utilizes a pandas.DataFrame for storing and accessing CSV data, while
allowing for the file to exist/not-exist.
"""
NOCHANGE = 0
INVALID = 1
OLDFILE = 2
UPDATED = 3
def __init__(self, filename, index=None, run_start_time=None):
self.filename = filename
self.modified = None
self.data = pandas.DataFrame()
self._index = index
self._run_start_time = run_start_time
self.update()
def __getitem__(self, key):
"""
Provides [] access to data.
Args:
key[str|list]: The key(s) to extract.
"""
if self.data.empty:
return pandas.Series()
return self.data[key]
def __contains__(self, key):
"""
Test if a key is stored in the data.
"""
return (key in self.data)
def __nonzero__(self):
"""
Return False if the data is empty.
"""
return not self.data.empty
def clear(self):
"""
Remove existing data.
"""
self.modified = None
self.data = pandas.DataFrame()
def update(self):
"""
Update with new data.
"""
retcode = MooseDataFrame.NOCHANGE
file_exists = os.path.exists(self.filename)
if file_exists and (os.path.getmtime(self.filename) < self._run_start_time):
self.clear()
message.mooseDebug("The csv file {} exists but is old compared to the run start time.".format(self.filename), debug=True)
retcode = MooseDataFrame.OLDFILE
elif not os.path.exists(self.filename):
self.clear()
message.mooseDebug("The csv file {} does not exist.".format(self.filename))
retcode = MooseDataFrame.INVALID
else:
modified = os.path.getmtime(self.filename)
if modified != self.modified:
retcode = MooseDataFrame.UPDATED
try:
self.modified = modified
self.data = pandas.read_csv(self.filename)
if self._index:
self.data.set_index(self._index, inplace=True)
message.mooseDebug("Reading csv file: {}".format(self.filename))
except:
self.clear()
message.mooseDebug("Unable to read file {} it likely does not contain data.".format(self.filename))
return retcode
| lgpl-2.1 |
Cadair/ginga | ginga/mplw/FigureCanvasQt.py | 5 | 2413 | #
# GingaCanvasQt.py -- classes for the display of FITS files in
# Matplotlib FigureCanvas
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
from __future__ import print_function
from ginga.toolkit import toolkit
if toolkit == 'qt5':
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as QtFigureCanvas
else:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as QtFigureCanvas
from ginga.qtw.QtHelp import QtGui, QtCore
def setup_Qt(widget, viewer):
def resizeEvent(*args):
rect = widget.geometry()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1
height = y2 - y1
if viewer is not None:
viewer.configure_window(width, height)
widget.setFocusPolicy(QtCore.Qt.FocusPolicy(
QtCore.Qt.TabFocus |
QtCore.Qt.ClickFocus |
QtCore.Qt.StrongFocus |
QtCore.Qt.WheelFocus))
widget.setMouseTracking(True)
widget.setAcceptDrops(True)
# Matplotlib has a bug where resize events are not reported
widget.connect(widget, QtCore.SIGNAL('resizeEvent()'),
resizeEvent)
class FigureCanvas(QtFigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).
"""
def __init__(self, fig, parent=None, width=5, height=4, dpi=100):
QtFigureCanvas.__init__(self, fig)
self.viewer = None
setup_Qt(self, None)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def resizeEvent(self, event):
rect = self.geometry()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1
height = y2 - y1
if self.viewer is not None:
self.viewer.configure_window(width, height)
return super(FigureCanvas, self).resizeEvent(event)
def sizeHint(self):
width, height = 300, 300
if self.viewer is not None:
width, height = self.viewer.get_desired_size()
return QtCore.QSize(width, height)
def set_viewer(self, viewer):
self.viewer = viewer
#END
| bsd-3-clause |
mblondel/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 23 | 8317 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
"""Incremental PCA on dense arrays."""
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
"""Test that the projection of data is correct."""
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
"""Test that the projection of data can be inverted."""
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
"""Test that n_components is >=1 and <= n_features."""
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
"""Test that changing n_components will raise an error."""
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
"""Test that components_ values are stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
"""Test that fit and partial_fit get equivalent results."""
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
"""Test that IncrementalPCA and PCA are approximate (to a sign flip)."""
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
"""Test that IncrementalPCA and PCA are approximate (to a sign flip)."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
"""Test that PCA and IncrementalPCA calculations match"""
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
"""Test that PCA and IncrementalPCA transforms match to sign flip."""
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
twdb/tbtools | tbtools/ptrac/read.py | 2 | 2700 | import pandas as pd
import numpy as np
import os
import utm
from datetime import timedelta
from .. import read
def release(path):
fin = open(os.path.join(path, 'input.Ptrac'), 'r')
s = fin.readline()
while 'release year' not in s:
s = fin.readline()
yr = int(s.split(',')[0])
s = fin.readline()
mth = int(s.split(',')[0])
s = fin.readline()
day = int(s.split(',')[0])
print('Release Date: {}-{}-{}'.format(yr, mth, day))
return yr, mth, day
def particles(path, zone_number):
yr, mth, day = release(path)
coords = read.coords(os.path.join(path, 'input'), zone_number, 'utm')
xMin = coords.easting.min()
yMin = coords.northing.min()
print('xmin = {}\nymin = {}'.format(xMin, yMin))
fils = ['particles1.w', 'particles2.w', 'particles3.w', 'particles4.w',
'particles5.w', 'particles6.w', 'particles7.w', 'particles8.w',
'particles9.w', 'particles10.w']
start = pd.datetime(yr, mth, day)
end = start + timedelta(days=28)
drange = pd.date_range(start, end, freq='30T')
cols = np.arange(1, 1001, 1)
partsLon = pd.DataFrame(0., index=drange, columns=cols)
partsLat = pd.DataFrame(0., index=drange, columns=cols)
with_pnum = True
f_test = open(os.path.join(path, fils[0]), 'r')
if len(f_test.readline().split()) == 10:
with_pnum = False
f_test.close()
iter = 0
for f in fils:
print('\nReading {}'.format(os.path.join(path, f)))
if with_pnum:
tmp = pd.read_csv(os.path.join(path, f), delim_whitespace=True,
header=None, parse_dates=[[1, 2, 3, 4]],
usecols=[0, 1, 2, 3, 4, 5, 6])
tmp.columns = ['date', 'particle', 'x', 'y']
else:
tmp = pd.read_csv(os.path.join(path, f), delim_whitespace=True,
header=None, parse_dates=[[0, 1, 2, 3]],
usecols=[0, 1, 2, 3, 4, 5])
tmp.columns = ['date', 'x', 'y']
nd = len(tmp['date'].unique())
tmp['particle'] = list(np.arange(1, 101) + 100 * iter) * nd
print('Converting from UTM to lat/lon')
x = tmp.x + xMin
y = tmp.y + yMin
lat, lon = utm.to_latlon(x, y, zone_number, 'R')
tmp['lon'] = lon
tmp['lat'] = lat
tmpLat = tmp.pivot(index='date', columns='particle', values='lat')
tmpLon = tmp.pivot(index='date', columns='particle', values='lon')
partsLon.ix[tmpLon.index, tmpLon.columns] = tmpLon
partsLat.ix[tmpLat.index, tmpLat.columns] = tmpLat
iter += 1
return partsLon, partsLat
| mit |
RomainBrault/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
markovg/nest-simulator | testsuite/manualtests/stdp_check.py | 4 | 4619 | # -*- coding: utf-8 -*-
#
# stdp_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
# Test script to reproduce changes in weight of a STDP synapse in an
# event-driven way. Pre- and post-synaptic spike trains are read in from
# spike_detector-0-0-3.gdf (output of test_stdp_poiss.sli).
# output: pre/post \t spike time \t weight
#
# Synaptic dynamics for STDP synapses according to Abigail Morrison's
# STDP model (see stdp_rec.pdf).
#
# first version: Moritz Helias, april 2006
# adapted to python MH, SK, May 2008
def stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus,
lmbd, tau_plus, tau_minus, delay):
w = w_init # initial weight
i = 0 # index of next presynaptic spike
j = 0 # index of next postsynaptic spike
K_plus = 0.
K_minus = 0.
last_t = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if pre_spikes[i] < post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt / tau_plus)
K_minus *= exp(-dt / tau_minus)
# depression
w = w / w_max - lmbd * alpha * (w / w_max) ** mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print("pre\t%.16f\t%.16f" % (pre_spikes[i], w))
K_plus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
# same timing of next pre- and postsynaptic spike
elif pre_spikes[i] == post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt / tau_plus)
K_minus *= exp(-dt / tau_minus)
# facilitation
w = w / w_max + lmbd * (1. - w / w_max) ** mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print("post\t%.16f\t%.16f" % (post_spikes[j] - delay, w))
# depression
w = w / w_max - lmbd * alpha * (w / w_max) ** mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print("pre\t%.16f\t%.16f" % (pre_spikes[i], w))
K_plus += 1.
K_minus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is postsynaptic
else:
dt = post_spikes[j] - last_t
# evolve exponential filters
K_plus *= exp(-dt / tau_plus)
K_minus *= exp(-dt / tau_minus)
# facilitation
w = w / w_max + lmbd * (1. - w / w_max) ** mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print("post\t%.16f\t%.16f" % (post_spikes[j] - delay, w))
K_minus += 1.
last_t = post_spikes[j] # time evolved until here
if j < len(post_spikes) - 1:
j += 1
advance = True
return w
# stdp parameters
w_init = 35.
w_max = 70.
alpha = .95
mu_plus = .05
mu_minus = .05
lmbd = .025
tau_plus = 20.
tau_minus = 20.
# dendritic delay
delay = 1.
# load spikes from simulation with test_stdp_poiss.sli
spikes = load("spike_detector-0-0-3.gdf")
pre_spikes = spikes[find(spikes[:, 0] == 5), 1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + delay at the synapse
post_spikes = spikes[find(spikes[:, 0] == 6), 1] + delay
# calculate development of stdp weight
stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd,
tau_plus, tau_minus, delay)
| gpl-2.0 |
nvoron23/scipy | scipy/integrate/quadrature.py | 25 | 27849 | from __future__ import division, print_function, absolute_import
__all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb',
'cumtrapz','newton_cotes']
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from numpy import sum, ones, add, diff, isinf, isscalar, \
asarray, real, trapz, arange, empty
import numpy as np
import math
import warnings
from scipy._lib.six import xrange
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results for speeding up multiple calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func,a,b,args=(),n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
[x,w] = _cached_p_roots(n)
x = real(x)
ainf, binf = map(isinf,(a,b))
if ainf or binf:
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0*sum(w*func(y,*args),0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if isscalar(x):
return func(x, *args)
x = asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
if hasattr(y0, 'dtype'):
output = empty((n,), dtype=y0.dtype)
else:
output = empty((n,), dtype=type(y0))
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = asarray(y)
if x is None:
d = dx
else:
x = asarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = add.accumulate(d * (y[slice1] + y[slice2]) / 2.0, axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y,start,stop,x,dx,axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
all = (slice(None),)*nd
slice0 = tupleset(all, axis, slice(start, stop, step))
slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = add.reduce(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = diff(x,axis=axis)
sl0 = tupleset(all, axis, slice(start, stop, step))
sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1)),axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = asarray(x)
if len(x.shape) == 1:
shapex = ones(nd)
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be 'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y,0,N-3,x,dx,axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y,1,N-2,x,dx,axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y,0,N-2,x,dx,axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
all = (slice(None),) * nd
slice0 = tupleset(all, axis, 0)
slicem1 = tupleset(all, axis, -1)
h = Ninterv*asarray(dx)*1.0
R[(0,0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = all
start = stop = step = Ninterv
for i in range(1,k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start,stop,step))
step >>= 1
R[(i,0)] = 0.5*(R[(i-1,0)] + h*add.reduce(y[slice_R],axis))
for j in range(1,i+1):
R[(i,j)] = R[(i,j-1)] + \
(R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*j))-1)
h = h / 2.0
if show:
if not isscalar(R[(0,0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%" + str(width) + '.' + str(precis)+'f'
print("\n Richardson Extrapolation Table for Romberg Integration ")
print("====================================================================")
for i in range(0,k+1):
for j in range(0,i+1):
print(formstr % R[(i,j)], end=' ')
print()
print("====================================================================\n")
return R[(k,k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * arange(0, numtosum)
s = sum(function(points),0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if isinf(a) or isinf(b):
raise ValueError("Romberg integration only available for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a,b]
intrange = b-a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
for i in xrange(1, divmax+1):
n = n * 2
ordsum = ordsum + _difftrap(vfunc, interval, n)
resmat.append([])
resmat[i].append(intrange * ordsum / n)
for k in range(i):
resmat[i].append(_romberg_diff(resmat[i-1][k], resmat[i][k], k+1))
result = resmat[i][i]
lastresult = resmat[i-1][i-1]
err = abs(result - lastresult)
if err < tol or err < rtol*abs(result):
break
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1:(1,2,[1,1],-1,12),
2:(1,3,[1,4,1],-1,90),
3:(3,8,[1,3,3,1],-3,80),
4:(2,45,[7,32,12,32,7],-8,945),
5:(5,288,[19,75,50,50,75,19],-275,12096),
6:(1,140,[41,216,27,272,27,216,41],-9,1400),
7:(7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8:(4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9:(9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10:(5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11:(11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12:(1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13:(13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14:(7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]` and :math:`\\Delta x = \\frac{x_N-x_0}{N}`
is the averages samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
return na*np.array(vi,float)/da, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2.0*yi - 1
nvec = np.arange(0,N+1)
C = ti**nvec[:,np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = np.dot(Cinv[:,::2],vec) * N/2
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.